2024-11-11 21:43:46,432 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-11 21:43:46,448 main DEBUG Took 0.013318 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 21:43:46,448 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 21:43:46,449 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 21:43:46,450 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 21:43:46,452 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,461 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 21:43:46,478 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,480 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,481 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,482 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,483 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,483 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,485 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,485 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,486 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,487 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,488 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,488 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,489 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,490 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,491 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,491 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,492 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,492 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,494 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,494 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,495 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,495 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,496 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,497 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 21:43:46,497 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,498 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 21:43:46,500 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 21:43:46,502 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 21:43:46,504 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 21:43:46,505 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 21:43:46,507 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 21:43:46,508 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 21:43:46,520 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 21:43:46,525 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 21:43:46,527 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 21:43:46,528 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 21:43:46,529 main DEBUG createAppenders(={Console}) 2024-11-11 21:43:46,530 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-11 21:43:46,530 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-11 21:43:46,530 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-11 21:43:46,531 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 21:43:46,532 main DEBUG OutputStream closed 2024-11-11 21:43:46,532 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 21:43:46,532 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 21:43:46,533 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-11 21:43:46,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 21:43:46,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 21:43:46,642 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 21:43:46,644 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 21:43:46,645 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 21:43:46,645 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 21:43:46,646 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 21:43:46,646 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 21:43:46,647 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 21:43:46,647 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 21:43:46,648 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 21:43:46,648 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 21:43:46,649 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 21:43:46,649 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 21:43:46,650 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 21:43:46,650 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 21:43:46,651 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 21:43:46,652 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 21:43:46,656 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 21:43:46,656 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-11 21:43:46,657 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 21:43:46,658 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-11T21:43:46,990 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e 2024-11-11 21:43:46,995 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 21:43:46,995 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T21:43:47,008 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-11T21:43:47,061 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=402, ProcessCount=11, AvailableMemoryMB=5695 2024-11-11T21:43:47,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T21:43:47,086 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5, deleteOnExit=true 2024-11-11T21:43:47,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T21:43:47,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/test.cache.data in system properties and HBase conf 2024-11-11T21:43:47,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T21:43:47,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.log.dir in system properties and HBase conf 2024-11-11T21:43:47,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T21:43:47,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T21:43:47,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T21:43:47,189 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-11T21:43:47,291 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T21:43:47,295 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:43:47,296 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:43:47,297 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T21:43:47,297 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:43:47,298 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T21:43:47,298 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T21:43:47,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:43:47,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:43:47,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T21:43:47,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/nfs.dump.dir in system properties and HBase conf 2024-11-11T21:43:47,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/java.io.tmpdir in system properties and HBase conf 2024-11-11T21:43:47,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:43:47,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T21:43:47,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T21:43:47,861 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:43:48,203 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-11T21:43:48,294 INFO [Time-limited test {}] log.Log(170): Logging initialized @2692ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-11T21:43:48,371 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:43:48,443 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:43:48,466 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:43:48,467 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:43:48,469 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:43:48,488 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:43:48,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:43:48,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:43:48,730 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/java.io.tmpdir/jetty-localhost-36625-hadoop-hdfs-3_4_1-tests_jar-_-any-5145661228551196402/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:43:48,739 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:36625} 2024-11-11T21:43:48,739 INFO [Time-limited test {}] server.Server(415): Started @3138ms 2024-11-11T21:43:48,770 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:43:49,159 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:43:49,170 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:43:49,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:43:49,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:43:49,176 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:43:49,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:43:49,178 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:43:49,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ca8488f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/java.io.tmpdir/jetty-localhost-38259-hadoop-hdfs-3_4_1-tests_jar-_-any-17033397549532628084/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:43:49,308 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@dc1ca4f{HTTP/1.1, (http/1.1)}{localhost:38259} 2024-11-11T21:43:49,309 INFO [Time-limited test {}] server.Server(415): Started @3708ms 2024-11-11T21:43:49,373 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:43:49,507 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:43:49,514 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:43:49,532 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:43:49,532 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:43:49,532 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:43:49,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:43:49,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:43:49,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ca1952e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/java.io.tmpdir/jetty-localhost-33789-hadoop-hdfs-3_4_1-tests_jar-_-any-13861351294934514071/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:43:49,664 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75639b0e{HTTP/1.1, (http/1.1)}{localhost:33789} 2024-11-11T21:43:49,665 INFO [Time-limited test {}] server.Server(415): Started @4064ms 2024-11-11T21:43:49,667 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:43:49,827 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/data/data2/current/BP-580669111-172.17.0.2-1731361427964/current, will proceed with Du for space computation calculation, 2024-11-11T21:43:49,827 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/data/data3/current/BP-580669111-172.17.0.2-1731361427964/current, will proceed with Du for space computation calculation, 2024-11-11T21:43:49,827 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/data/data1/current/BP-580669111-172.17.0.2-1731361427964/current, will proceed with Du for space computation calculation, 2024-11-11T21:43:49,833 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/data/data4/current/BP-580669111-172.17.0.2-1731361427964/current, will proceed with Du for space computation calculation, 2024-11-11T21:43:49,885 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:43:49,893 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:43:49,956 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7a34780f6986fa0e with lease ID 0x74883d50e364fbb1: Processing first storage report for DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8 from datanode DatanodeRegistration(127.0.0.1:40991, datanodeUuid=f4f8cb69-1fd7-4c2d-a53b-9e4fb83b2718, infoPort=43441, infoSecurePort=0, ipcPort=46735, storageInfo=lv=-57;cid=testClusterID;nsid=1196742393;c=1731361427964) 2024-11-11T21:43:49,957 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7a34780f6986fa0e with lease ID 0x74883d50e364fbb1: from storage DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8 node DatanodeRegistration(127.0.0.1:40991, datanodeUuid=f4f8cb69-1fd7-4c2d-a53b-9e4fb83b2718, infoPort=43441, infoSecurePort=0, ipcPort=46735, storageInfo=lv=-57;cid=testClusterID;nsid=1196742393;c=1731361427964), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-11T21:43:49,958 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11324adce325ee82 with lease ID 0x74883d50e364fbb2: Processing first storage report for DS-de7b2749-e7b5-4a2d-92d9-e74f901847db from datanode DatanodeRegistration(127.0.0.1:39997, datanodeUuid=aa07b05a-66fd-4af1-ad17-01ade69c1b07, infoPort=45669, infoSecurePort=0, ipcPort=35815, storageInfo=lv=-57;cid=testClusterID;nsid=1196742393;c=1731361427964) 2024-11-11T21:43:49,958 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11324adce325ee82 with lease ID 0x74883d50e364fbb2: from storage DS-de7b2749-e7b5-4a2d-92d9-e74f901847db node DatanodeRegistration(127.0.0.1:39997, datanodeUuid=aa07b05a-66fd-4af1-ad17-01ade69c1b07, infoPort=45669, infoSecurePort=0, ipcPort=35815, storageInfo=lv=-57;cid=testClusterID;nsid=1196742393;c=1731361427964), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:43:49,959 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7a34780f6986fa0e with lease ID 0x74883d50e364fbb1: Processing first storage report for DS-7f1d99c9-d099-433e-a76e-f087209d8fc6 from datanode DatanodeRegistration(127.0.0.1:40991, datanodeUuid=f4f8cb69-1fd7-4c2d-a53b-9e4fb83b2718, infoPort=43441, infoSecurePort=0, ipcPort=46735, storageInfo=lv=-57;cid=testClusterID;nsid=1196742393;c=1731361427964) 2024-11-11T21:43:49,959 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7a34780f6986fa0e with lease ID 0x74883d50e364fbb1: from storage DS-7f1d99c9-d099-433e-a76e-f087209d8fc6 node DatanodeRegistration(127.0.0.1:40991, datanodeUuid=f4f8cb69-1fd7-4c2d-a53b-9e4fb83b2718, infoPort=43441, infoSecurePort=0, ipcPort=46735, storageInfo=lv=-57;cid=testClusterID;nsid=1196742393;c=1731361427964), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T21:43:49,959 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x11324adce325ee82 with lease ID 0x74883d50e364fbb2: Processing first storage report for DS-4d298aec-16b9-4b97-8292-5cfd4a6ba257 from datanode DatanodeRegistration(127.0.0.1:39997, datanodeUuid=aa07b05a-66fd-4af1-ad17-01ade69c1b07, infoPort=45669, infoSecurePort=0, ipcPort=35815, storageInfo=lv=-57;cid=testClusterID;nsid=1196742393;c=1731361427964) 2024-11-11T21:43:49,960 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x11324adce325ee82 with lease ID 0x74883d50e364fbb2: from storage DS-4d298aec-16b9-4b97-8292-5cfd4a6ba257 node DatanodeRegistration(127.0.0.1:39997, datanodeUuid=aa07b05a-66fd-4af1-ad17-01ade69c1b07, infoPort=45669, infoSecurePort=0, ipcPort=35815, storageInfo=lv=-57;cid=testClusterID;nsid=1196742393;c=1731361427964), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:43:50,075 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e 2024-11-11T21:43:50,152 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/zookeeper_0, clientPort=50357, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T21:43:50,162 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50357 2024-11-11T21:43:50,178 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:43:50,181 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:43:50,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:43:50,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:43:50,860 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0 with version=8 2024-11-11T21:43:50,860 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/hbase-staging 2024-11-11T21:43:50,974 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-11T21:43:51,216 INFO [Time-limited test {}] client.ConnectionUtils(128): master/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:43:51,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:43:51,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:43:51,233 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:43:51,234 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:43:51,234 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:43:51,391 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T21:43:51,467 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-11T21:43:51,479 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-11T21:43:51,484 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:43:51,517 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 15888 (auto-detected) 2024-11-11T21:43:51,518 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-11T21:43:51,540 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45429 2024-11-11T21:43:51,562 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45429 connecting to ZooKeeper ensemble=127.0.0.1:50357 2024-11-11T21:43:51,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:454290x0, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:43:51,598 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45429-0x10030be36e10000 connected 2024-11-11T21:43:51,633 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:43:51,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:43:51,648 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:43:51,652 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0, hbase.cluster.distributed=false 2024-11-11T21:43:51,679 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:43:51,692 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45429 2024-11-11T21:43:51,693 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45429 2024-11-11T21:43:51,695 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45429 2024-11-11T21:43:51,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45429 2024-11-11T21:43:51,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45429 2024-11-11T21:43:51,816 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:43:51,819 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:43:51,819 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:43:51,820 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:43:51,820 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:43:51,820 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:43:51,823 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T21:43:51,826 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:43:51,827 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41845 2024-11-11T21:43:51,831 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41845 connecting to ZooKeeper ensemble=127.0.0.1:50357 2024-11-11T21:43:51,832 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:43:51,840 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:43:51,851 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:418450x0, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:43:51,852 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:418450x0, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:43:51,853 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41845-0x10030be36e10001 connected 2024-11-11T21:43:51,857 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T21:43:51,865 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T21:43:51,868 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T21:43:51,873 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:43:51,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41845 2024-11-11T21:43:51,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41845 2024-11-11T21:43:51,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41845 2024-11-11T21:43:51,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41845 2024-11-11T21:43:51,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41845 2024-11-11T21:43:51,900 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;22a1f13b90b0:45429 2024-11-11T21:43:51,901 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/22a1f13b90b0,45429,1731361431028 2024-11-11T21:43:51,908 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:43:51,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:43:51,910 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/22a1f13b90b0,45429,1731361431028 2024-11-11T21:43:51,938 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T21:43:51,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:51,939 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:51,939 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T21:43:51,941 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/22a1f13b90b0,45429,1731361431028 from backup master directory 2024-11-11T21:43:51,943 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:43:51,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/22a1f13b90b0,45429,1731361431028 2024-11-11T21:43:51,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:43:51,944 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:43:51,944 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=22a1f13b90b0,45429,1731361431028 2024-11-11T21:43:51,947 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-11T21:43:51,949 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-11T21:43:52,009 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/hbase.id] with ID: 5cc19a2b-f255-4dab-885b-e4b2cb3e4279 2024-11-11T21:43:52,009 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/.tmp/hbase.id 2024-11-11T21:43:52,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:43:52,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:43:52,025 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/.tmp/hbase.id]:[hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/hbase.id] 2024-11-11T21:43:52,074 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:43:52,080 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T21:43:52,100 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-11T21:43:52,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:52,104 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:52,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:43:52,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:43:52,140 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:43:52,142 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T21:43:52,149 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:43:52,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:43:52,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:43:52,220 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store 2024-11-11T21:43:52,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:43:52,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:43:52,251 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-11T21:43:52,254 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:43:52,256 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:43:52,256 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:43:52,256 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:43:52,258 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:43:52,258 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:43:52,258 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:43:52,259 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361432256Disabling compacts and flushes for region at 1731361432256Disabling writes for close at 1731361432258 (+2 ms)Writing region close event to WAL at 1731361432258Closed at 1731361432258 2024-11-11T21:43:52,262 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/.initializing 2024-11-11T21:43:52,263 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/WALs/22a1f13b90b0,45429,1731361431028 2024-11-11T21:43:52,302 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C45429%2C1731361431028, suffix=, logDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/WALs/22a1f13b90b0,45429,1731361431028, archiveDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/oldWALs, maxLogs=10 2024-11-11T21:43:52,313 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C45429%2C1731361431028.1731361432308 2024-11-11T21:43:52,338 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/WALs/22a1f13b90b0,45429,1731361431028/22a1f13b90b0%2C45429%2C1731361431028.1731361432308 2024-11-11T21:43:52,353 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43441:43441),(127.0.0.1/127.0.0.1:45669:45669)] 2024-11-11T21:43:52,358 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:43:52,358 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:43:52,363 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,364 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,436 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T21:43:52,440 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:52,443 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:43:52,444 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T21:43:52,448 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:52,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:43:52,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T21:43:52,454 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:52,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:43:52,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T21:43:52,458 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:52,459 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:43:52,460 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,464 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,465 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,472 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,472 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,478 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T21:43:52,484 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:43:52,495 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:43:52,497 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699636, jitterRate=-0.11036783456802368}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T21:43:52,511 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731361432382Initializing all the Stores at 1731361432385 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361432385Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361432386 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361432386Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361432387 (+1 ms)Cleaning up temporary data from old regions at 1731361432473 (+86 ms)Region opened successfully at 1731361432511 (+38 ms) 2024-11-11T21:43:52,513 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T21:43:52,553 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9ec07a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:43:52,586 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T21:43:52,596 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T21:43:52,597 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T21:43:52,599 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T21:43:52,601 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-11T21:43:52,607 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-11T21:43:52,607 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T21:43:52,636 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T21:43:52,646 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T21:43:52,648 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T21:43:52,651 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T21:43:52,653 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T21:43:52,654 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T21:43:52,658 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T21:43:52,661 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T21:43:52,663 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T21:43:52,664 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T21:43:52,666 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T21:43:52,683 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T21:43:52,685 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T21:43:52,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:43:52,689 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:43:52,690 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:52,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:52,694 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=22a1f13b90b0,45429,1731361431028, sessionid=0x10030be36e10000, setting cluster-up flag (Was=false) 2024-11-11T21:43:52,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:52,707 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:52,712 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T21:43:52,714 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,45429,1731361431028 2024-11-11T21:43:52,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:52,719 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:52,722 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T21:43:52,724 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,45429,1731361431028 2024-11-11T21:43:52,730 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T21:43:52,786 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(746): ClusterId : 5cc19a2b-f255-4dab-885b-e4b2cb3e4279 2024-11-11T21:43:52,789 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T21:43:52,795 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T21:43:52,795 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T21:43:52,798 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T21:43:52,798 DEBUG [RS:0;22a1f13b90b0:41845 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@245e260, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:43:52,813 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T21:43:52,816 DEBUG [RS:0;22a1f13b90b0:41845 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;22a1f13b90b0:41845 2024-11-11T21:43:52,820 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T21:43:52,820 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T21:43:52,821 DEBUG [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T21:43:52,823 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(2659): reportForDuty to master=22a1f13b90b0,45429,1731361431028 with port=41845, startcode=1731361431777 2024-11-11T21:43:52,823 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T21:43:52,831 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T21:43:52,838 DEBUG [RS:0;22a1f13b90b0:41845 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T21:43:52,837 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 22a1f13b90b0,45429,1731361431028 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T21:43:52,845 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:43:52,845 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:43:52,846 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:43:52,846 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:43:52,846 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/22a1f13b90b0:0, corePoolSize=10, maxPoolSize=10 2024-11-11T21:43:52,846 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:52,846 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:43:52,846 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:52,850 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731361462850 2024-11-11T21:43:52,852 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T21:43:52,853 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:43:52,853 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T21:43:52,853 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T21:43:52,857 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T21:43:52,857 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T21:43:52,857 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T21:43:52,858 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T21:43:52,858 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:52,860 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:52,861 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T21:43:52,865 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T21:43:52,868 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T21:43:52,869 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T21:43:52,878 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T21:43:52,878 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T21:43:52,883 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361432880,5,FailOnTimeoutGroup] 2024-11-11T21:43:52,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:43:52,887 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361432884,5,FailOnTimeoutGroup] 2024-11-11T21:43:52,887 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:52,887 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T21:43:52,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:43:52,888 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:52,889 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:52,889 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T21:43:52,890 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0 2024-11-11T21:43:52,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:43:52,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:43:52,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:43:52,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:43:52,921 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:43:52,922 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:52,923 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:43:52,924 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:43:52,928 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45983, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T21:43:52,928 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:43:52,929 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:52,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:43:52,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:43:52,934 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:43:52,934 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:52,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:43:52,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:43:52,936 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45429 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:52,939 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45429 {}] master.ServerManager(517): Registering regionserver=22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:52,940 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:43:52,940 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:52,942 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:43:52,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:43:52,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740 2024-11-11T21:43:52,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740 2024-11-11T21:43:52,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:43:52,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:43:52,951 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:43:52,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:43:52,958 DEBUG [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0 2024-11-11T21:43:52,958 DEBUG [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42807 2024-11-11T21:43:52,959 DEBUG [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T21:43:52,961 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:43:52,962 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=694125, jitterRate=-0.11737556755542755}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:43:52,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:43:52,964 DEBUG [RS:0;22a1f13b90b0:41845 {}] zookeeper.ZKUtil(111): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:52,965 WARN [RS:0;22a1f13b90b0:41845 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:43:52,965 INFO [RS:0;22a1f13b90b0:41845 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:43:52,965 DEBUG [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:52,967 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731361432913Initializing all the Stores at 1731361432915 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361432915Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361432917 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361432917Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361432917Cleaning up temporary data from old regions at 1731361432950 (+33 ms)Region opened successfully at 1731361432967 (+17 ms) 2024-11-11T21:43:52,967 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:43:52,968 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:43:52,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:43:52,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [22a1f13b90b0,41845,1731361431777] 2024-11-11T21:43:52,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:43:52,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:43:52,972 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:43:52,973 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361432967Disabling compacts and flushes for region at 1731361432967Disabling writes for close at 1731361432968 (+1 ms)Writing region close event to WAL at 1731361432972 (+4 ms)Closed at 1731361432972 2024-11-11T21:43:52,977 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:43:52,978 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T21:43:52,986 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T21:43:52,998 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:43:53,001 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T21:43:53,008 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T21:43:53,027 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T21:43:53,035 INFO [RS:0;22a1f13b90b0:41845 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T21:43:53,036 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,037 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T21:43:53,045 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T21:43:53,046 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,047 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,047 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,047 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,047 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,047 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,047 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:43:53,047 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,048 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,048 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,048 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,048 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,048 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:43:53,048 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:43:53,048 DEBUG [RS:0;22a1f13b90b0:41845 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:43:53,049 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,049 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,050 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,050 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,050 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,050 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,41845,1731361431777-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:43:53,070 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T21:43:53,072 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,41845,1731361431777-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,072 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,072 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.Replication(171): 22a1f13b90b0,41845,1731361431777 started 2024-11-11T21:43:53,097 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,098 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(1482): Serving as 22a1f13b90b0,41845,1731361431777, RpcServer on 22a1f13b90b0/172.17.0.2:41845, sessionid=0x10030be36e10001 2024-11-11T21:43:53,099 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T21:43:53,099 DEBUG [RS:0;22a1f13b90b0:41845 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:53,100 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,41845,1731361431777' 2024-11-11T21:43:53,100 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T21:43:53,101 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T21:43:53,102 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T21:43:53,102 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T21:43:53,103 DEBUG [RS:0;22a1f13b90b0:41845 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:53,103 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,41845,1731361431777' 2024-11-11T21:43:53,103 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T21:43:53,104 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T21:43:53,104 DEBUG [RS:0;22a1f13b90b0:41845 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T21:43:53,104 INFO [RS:0;22a1f13b90b0:41845 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T21:43:53,104 INFO [RS:0;22a1f13b90b0:41845 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T21:43:53,152 WARN [22a1f13b90b0:45429 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T21:43:53,214 INFO [RS:0;22a1f13b90b0:41845 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C41845%2C1731361431777, suffix=, logDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777, archiveDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs, maxLogs=32 2024-11-11T21:43:53,218 INFO [RS:0;22a1f13b90b0:41845 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.1731361433217 2024-11-11T21:43:53,230 INFO [RS:0;22a1f13b90b0:41845 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361433217 2024-11-11T21:43:53,237 DEBUG [RS:0;22a1f13b90b0:41845 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45669:45669),(127.0.0.1/127.0.0.1:43441:43441)] 2024-11-11T21:43:53,404 DEBUG [22a1f13b90b0:45429 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T21:43:53,416 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:53,422 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,41845,1731361431777, state=OPENING 2024-11-11T21:43:53,426 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T21:43:53,428 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:53,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:43:53,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:43:53,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:43:53,430 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:43:53,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,41845,1731361431777}] 2024-11-11T21:43:53,610 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T21:43:53,614 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49197, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T21:43:53,628 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T21:43:53,629 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:43:53,633 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C41845%2C1731361431777.meta, suffix=.meta, logDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777, archiveDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs, maxLogs=32 2024-11-11T21:43:53,636 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.meta.1731361433636.meta 2024-11-11T21:43:53,654 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.meta.1731361433636.meta 2024-11-11T21:43:53,656 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45669:45669),(127.0.0.1/127.0.0.1:43441:43441)] 2024-11-11T21:43:53,657 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:43:53,660 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T21:43:53,663 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T21:43:53,668 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T21:43:53,672 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T21:43:53,673 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:43:53,673 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T21:43:53,674 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T21:43:53,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:43:53,680 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:43:53,680 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:53,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:43:53,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:43:53,683 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:43:53,684 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:53,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:43:53,685 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:43:53,687 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:43:53,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:53,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:43:53,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:43:53,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:43:53,690 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:53,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:43:53,692 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:43:53,694 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740 2024-11-11T21:43:53,699 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740 2024-11-11T21:43:53,701 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:43:53,702 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:43:53,703 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:43:53,707 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:43:53,709 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705272, jitterRate=-0.10320146381855011}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:43:53,710 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T21:43:53,712 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731361433674Writing region info on filesystem at 1731361433675 (+1 ms)Initializing all the Stores at 1731361433677 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361433677Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361433678 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361433678Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361433678Cleaning up temporary data from old regions at 1731361433702 (+24 ms)Running coprocessor post-open hooks at 1731361433710 (+8 ms)Region opened successfully at 1731361433711 (+1 ms) 2024-11-11T21:43:53,719 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731361433598 2024-11-11T21:43:53,732 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T21:43:53,732 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T21:43:53,734 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:53,737 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,41845,1731361431777, state=OPEN 2024-11-11T21:43:53,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:43:53,741 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:43:53,741 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:43:53,741 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:43:53,742 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:53,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T21:43:53,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,41845,1731361431777 in 310 msec 2024-11-11T21:43:53,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T21:43:53,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 762 msec 2024-11-11T21:43:53,756 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:43:53,756 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T21:43:53,776 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:43:53,778 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,41845,1731361431777, seqNum=-1] 2024-11-11T21:43:53,805 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:43:53,807 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51259, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:43:53,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0610 sec 2024-11-11T21:43:53,832 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731361433831, completionTime=-1 2024-11-11T21:43:53,834 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T21:43:53,834 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T21:43:53,860 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T21:43:53,861 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731361493860 2024-11-11T21:43:53,861 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731361553861 2024-11-11T21:43:53,861 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 26 msec 2024-11-11T21:43:53,864 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45429,1731361431028-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,865 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45429,1731361431028-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,865 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45429,1731361431028-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,867 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-22a1f13b90b0:45429, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,868 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,868 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,874 DEBUG [master/22a1f13b90b0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T21:43:53,898 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.953sec 2024-11-11T21:43:53,900 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T21:43:53,902 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T21:43:53,903 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T21:43:53,903 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T21:43:53,903 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T21:43:53,904 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45429,1731361431028-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:43:53,905 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45429,1731361431028-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T21:43:53,912 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T21:43:53,913 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T21:43:53,914 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45429,1731361431028-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:43:53,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48e858bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:43:54,000 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-11T21:43:54,000 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-11T21:43:54,004 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 22a1f13b90b0,45429,-1 for getting cluster id 2024-11-11T21:43:54,008 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T21:43:54,018 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5cc19a2b-f255-4dab-885b-e4b2cb3e4279' 2024-11-11T21:43:54,021 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T21:43:54,021 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5cc19a2b-f255-4dab-885b-e4b2cb3e4279" 2024-11-11T21:43:54,025 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@100cd7e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:43:54,025 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [22a1f13b90b0,45429,-1] 2024-11-11T21:43:54,028 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T21:43:54,031 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:43:54,033 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49770, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T21:43:54,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b20e2b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:43:54,037 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:43:54,049 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,41845,1731361431777, seqNum=-1] 2024-11-11T21:43:54,049 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:43:54,052 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36738, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:43:54,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=22a1f13b90b0,45429,1731361431028 2024-11-11T21:43:54,090 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:43:54,098 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T21:43:54,103 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T21:43:54,110 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 22a1f13b90b0,45429,1731361431028 2024-11-11T21:43:54,116 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@549cc41e 2024-11-11T21:43:54,117 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T21:43:54,124 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49776, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T21:43:54,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45429 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T21:43:54,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45429 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T21:43:54,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45429 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:43:54,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45429 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-11T21:43:54,143 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T21:43:54,145 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45429 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-11T21:43:54,146 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:54,149 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T21:43:54,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:43:54,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741835_1011 (size=389) 2024-11-11T21:43:54,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741835_1011 (size=389) 2024-11-11T21:43:54,199 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1390f962e52fc2f0ff7949a317fc2221, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0 2024-11-11T21:43:54,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741836_1012 (size=72) 2024-11-11T21:43:54,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741836_1012 (size=72) 2024-11-11T21:43:54,212 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:43:54,213 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1390f962e52fc2f0ff7949a317fc2221, disabling compactions & flushes 2024-11-11T21:43:54,213 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:43:54,213 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:43:54,213 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. after waiting 0 ms 2024-11-11T21:43:54,213 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:43:54,213 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:43:54,213 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1390f962e52fc2f0ff7949a317fc2221: Waiting for close lock at 1731361434213Disabling compacts and flushes for region at 1731361434213Disabling writes for close at 1731361434213Writing region close event to WAL at 1731361434213Closed at 1731361434213 2024-11-11T21:43:54,216 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T21:43:54,222 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731361434216"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731361434216"}]},"ts":"1731361434216"} 2024-11-11T21:43:54,228 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T21:43:54,231 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T21:43:54,234 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361434231"}]},"ts":"1731361434231"} 2024-11-11T21:43:54,239 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-11T21:43:54,241 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1390f962e52fc2f0ff7949a317fc2221, ASSIGN}] 2024-11-11T21:43:54,245 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1390f962e52fc2f0ff7949a317fc2221, ASSIGN 2024-11-11T21:43:54,247 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1390f962e52fc2f0ff7949a317fc2221, ASSIGN; state=OFFLINE, location=22a1f13b90b0,41845,1731361431777; forceNewPlan=false, retain=false 2024-11-11T21:43:54,398 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1390f962e52fc2f0ff7949a317fc2221, regionState=OPENING, regionLocation=22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:54,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1390f962e52fc2f0ff7949a317fc2221, ASSIGN because future has completed 2024-11-11T21:43:54,404 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1390f962e52fc2f0ff7949a317fc2221, server=22a1f13b90b0,41845,1731361431777}] 2024-11-11T21:43:54,567 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:43:54,568 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1390f962e52fc2f0ff7949a317fc2221, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:43:54,569 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,571 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:43:54,571 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,571 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,581 INFO [StoreOpener-1390f962e52fc2f0ff7949a317fc2221-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,585 INFO [StoreOpener-1390f962e52fc2f0ff7949a317fc2221-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1390f962e52fc2f0ff7949a317fc2221 columnFamilyName info 2024-11-11T21:43:54,585 DEBUG [StoreOpener-1390f962e52fc2f0ff7949a317fc2221-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:43:54,588 INFO [StoreOpener-1390f962e52fc2f0ff7949a317fc2221-1 {}] regionserver.HStore(327): Store=1390f962e52fc2f0ff7949a317fc2221/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:43:54,588 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,590 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,591 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,592 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,592 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,597 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,602 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:43:54,604 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1390f962e52fc2f0ff7949a317fc2221; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866010, jitterRate=0.10118930041790009}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T21:43:54,604 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:43:54,605 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1390f962e52fc2f0ff7949a317fc2221: Running coprocessor pre-open hook at 1731361434572Writing region info on filesystem at 1731361434572Initializing all the Stores at 1731361434575 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361434575Cleaning up temporary data from old regions at 1731361434592 (+17 ms)Running coprocessor post-open hooks at 1731361434604 (+12 ms)Region opened successfully at 1731361434605 (+1 ms) 2024-11-11T21:43:54,607 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221., pid=6, masterSystemTime=1731361434559 2024-11-11T21:43:54,612 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:43:54,612 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:43:54,613 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1390f962e52fc2f0ff7949a317fc2221, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,41845,1731361431777 2024-11-11T21:43:54,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1390f962e52fc2f0ff7949a317fc2221, server=22a1f13b90b0,41845,1731361431777 because future has completed 2024-11-11T21:43:54,631 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T21:43:54,631 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1390f962e52fc2f0ff7949a317fc2221, server=22a1f13b90b0,41845,1731361431777 in 219 msec 2024-11-11T21:43:54,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T21:43:54,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1390f962e52fc2f0ff7949a317fc2221, ASSIGN in 390 msec 2024-11-11T21:43:54,641 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T21:43:54,642 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361434642"}]},"ts":"1731361434642"} 2024-11-11T21:43:54,646 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-11T21:43:54,649 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T21:43:54,653 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 513 msec 2024-11-11T21:43:59,181 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T21:43:59,235 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T21:43:59,237 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-11T21:44:01,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T21:44:01,465 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T21:44:01,467 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-11T21:44:01,467 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T21:44:01,468 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:44:01,468 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T21:44:01,469 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T21:44:01,469 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-11T21:44:04,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45429 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:44:04,185 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-11T21:44:04,189 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-11T21:44:04,196 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-11T21:44:04,196 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:44:04,197 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.1731361444197 2024-11-11T21:44:04,206 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:04,206 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:04,206 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:04,207 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:04,207 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:04,207 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361433217 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361444197 2024-11-11T21:44:04,210 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43441:43441),(127.0.0.1/127.0.0.1:45669:45669)] 2024-11-11T21:44:04,210 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361433217 is not closed yet, will try archiving it next time 2024-11-11T21:44:04,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741833_1009 (size=451) 2024-11-11T21:44:04,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741833_1009 (size=451) 2024-11-11T21:44:04,212 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361433217 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs/22a1f13b90b0%2C41845%2C1731361431777.1731361433217 2024-11-11T21:44:04,219 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221., hostname=22a1f13b90b0,41845,1731361431777, seqNum=2] 2024-11-11T21:44:16,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41845 {}] regionserver.HRegion(8855): Flush requested on 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:44:16,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1390f962e52fc2f0ff7949a317fc2221 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:44:16,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/3617734395084bfaaf4f8f1cbd97b561 is 1080, key is row0001/info:/1731361444222/Put/seqid=0 2024-11-11T21:44:16,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741838_1014 (size=12509) 2024-11-11T21:44:16,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741838_1014 (size=12509) 2024-11-11T21:44:16,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/3617734395084bfaaf4f8f1cbd97b561 2024-11-11T21:44:16,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/3617734395084bfaaf4f8f1cbd97b561 as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3617734395084bfaaf4f8f1cbd97b561 2024-11-11T21:44:16,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3617734395084bfaaf4f8f1cbd97b561, entries=7, sequenceid=11, filesize=12.2 K 2024-11-11T21:44:16,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1390f962e52fc2f0ff7949a317fc2221 in 157ms, sequenceid=11, compaction requested=false 2024-11-11T21:44:16,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1390f962e52fc2f0ff7949a317fc2221: 2024-11-11T21:44:20,072 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T21:44:24,280 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.1731361464279 2024-11-11T21:44:24,493 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK], DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK]] 2024-11-11T21:44:24,494 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:24,494 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:24,494 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:24,494 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:24,494 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:24,495 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361444197 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361464279 2024-11-11T21:44:24,495 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45669:45669),(127.0.0.1/127.0.0.1:43441:43441)] 2024-11-11T21:44:24,496 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361444197 is not closed yet, will try archiving it next time 2024-11-11T21:44:24,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741837_1013 (size=12399) 2024-11-11T21:44:24,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741837_1013 (size=12399) 2024-11-11T21:44:24,699 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:26,904 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:29,111 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:31,319 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:31,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41845 {}] regionserver.HRegion(8855): Flush requested on 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:44:31,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1390f962e52fc2f0ff7949a317fc2221 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:44:31,523 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:31,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/3eba083c0a454d8689ecea3e2ce119d6 is 1080, key is row0008/info:/1731361458262/Put/seqid=0 2024-11-11T21:44:31,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741840_1016 (size=12509) 2024-11-11T21:44:31,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741840_1016 (size=12509) 2024-11-11T21:44:31,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/3eba083c0a454d8689ecea3e2ce119d6 2024-11-11T21:44:31,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/3eba083c0a454d8689ecea3e2ce119d6 as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3eba083c0a454d8689ecea3e2ce119d6 2024-11-11T21:44:31,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3eba083c0a454d8689ecea3e2ce119d6, entries=7, sequenceid=21, filesize=12.2 K 2024-11-11T21:44:31,764 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:31,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1390f962e52fc2f0ff7949a317fc2221 in 444ms, sequenceid=21, compaction requested=false 2024-11-11T21:44:31,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1390f962e52fc2f0ff7949a317fc2221: 2024-11-11T21:44:31,766 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-11T21:44:31,766 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:44:31,768 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3617734395084bfaaf4f8f1cbd97b561 because midkey is the same as first or last row 2024-11-11T21:44:33,527 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:33,978 INFO [master/22a1f13b90b0:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T21:44:33,978 INFO [master/22a1f13b90b0:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T21:44:35,736 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:35,741 WARN [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:35,744 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C41845%2C1731361431777:(num 1731361464279) roll requested 2024-11-11T21:44:35,745 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.1731361475744 2024-11-11T21:44:35,957 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:35,957 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:35,958 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:35,958 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:35,958 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:35,958 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:35,959 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361464279 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361475744 2024-11-11T21:44:35,960 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45669:45669),(127.0.0.1/127.0.0.1:43441:43441)] 2024-11-11T21:44:35,960 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361464279 is not closed yet, will try archiving it next time 2024-11-11T21:44:35,961 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361444197 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs/22a1f13b90b0%2C41845%2C1731361431777.1731361444197 2024-11-11T21:44:35,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741839_1015 (size=7739) 2024-11-11T21:44:35,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741839_1015 (size=7739) 2024-11-11T21:44:37,941 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:39,569 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1390f962e52fc2f0ff7949a317fc2221, had cached 0 bytes from a total of 25018 2024-11-11T21:44:40,146 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:42,355 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:44,562 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:46,564 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T21:44:46,565 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.1731361486565 2024-11-11T21:44:50,073 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T21:44:51,581 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:51,583 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:44:51,583 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C41845%2C1731361431777:(num 1731361486565) roll requested 2024-11-11T21:44:51,584 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:51,584 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:51,584 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:51,584 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:51,584 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:44:51,585 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361475744 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361486565 2024-11-11T21:44:51,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741841_1017 (size=4753) 2024-11-11T21:44:51,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741841_1017 (size=4753) 2024-11-11T21:44:51,589 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43441:43441),(127.0.0.1/127.0.0.1:45669:45669)] 2024-11-11T21:44:51,589 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361475744 is not closed yet, will try archiving it next time 2024-11-11T21:44:51,590 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.1731361491589 2024-11-11T21:44:56,593 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK], DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK]] 2024-11-11T21:44:56,593 WARN [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK], DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK]] 2024-11-11T21:44:56,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41845 {}] regionserver.HRegion(8855): Flush requested on 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:44:56,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1390f962e52fc2f0ff7949a317fc2221 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:44:56,601 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK], DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK]] 2024-11-11T21:44:56,602 WARN [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK], DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK]] 2024-11-11T21:44:58,594 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T21:45:01,597 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK], DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK]] 2024-11-11T21:45:01,597 WARN [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK], DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK]] 2024-11-11T21:45:01,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:01,598 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:01,598 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:01,598 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:01,599 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:01,600 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361486565 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361491589 2024-11-11T21:45:01,602 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45669:45669),(127.0.0.1/127.0.0.1:43441:43441)] 2024-11-11T21:45:01,602 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361486565 is not closed yet, will try archiving it next time 2024-11-11T21:45:01,602 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C41845%2C1731361431777:(num 1731361491589) roll requested 2024-11-11T21:45:01,603 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.1731361501603 2024-11-11T21:45:01,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741842_1018 (size=1569) 2024-11-11T21:45:01,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741842_1018 (size=1569) 2024-11-11T21:45:01,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/8b44bc83509f42588b5bb7cf424ba318 is 1080, key is row0015/info:/1731361473323/Put/seqid=0 2024-11-11T21:45:01,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741844_1020 (size=12509) 2024-11-11T21:45:01,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741844_1020 (size=12509) 2024-11-11T21:45:01,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/8b44bc83509f42588b5bb7cf424ba318 2024-11-11T21:45:01,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/8b44bc83509f42588b5bb7cf424ba318 as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/8b44bc83509f42588b5bb7cf424ba318 2024-11-11T21:45:01,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/8b44bc83509f42588b5bb7cf424ba318, entries=7, sequenceid=31, filesize=12.2 K 2024-11-11T21:45:06,618 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:45:06,618 WARN [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:45:06,637 INFO [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:45:06,637 WARN [FSHLog-0-hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0-prefix:22a1f13b90b0,41845,1731361431777 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39997,DS-de7b2749-e7b5-4a2d-92d9-e74f901847db,DISK], DatanodeInfoWithStorage[127.0.0.1:40991,DS-df71fc79-3fab-4fca-a28f-ead44ec4e7d8,DISK]] 2024-11-11T21:45:06,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1390f962e52fc2f0ff7949a317fc2221 in 10044ms, sequenceid=31, compaction requested=true 2024-11-11T21:45:06,638 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1390f962e52fc2f0ff7949a317fc2221: 2024-11-11T21:45:06,638 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,638 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,638 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-11T21:45:06,638 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:06,639 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,639 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3617734395084bfaaf4f8f1cbd97b561 because midkey is the same as first or last row 2024-11-11T21:45:06,639 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,640 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361491589 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361501603 2024-11-11T21:45:06,642 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45669:45669),(127.0.0.1/127.0.0.1:43441:43441)] 2024-11-11T21:45:06,642 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361491589 is not closed yet, will try archiving it next time 2024-11-11T21:45:06,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1390f962e52fc2f0ff7949a317fc2221:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:45:06,643 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361464279 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs/22a1f13b90b0%2C41845%2C1731361431777.1731361464279 2024-11-11T21:45:06,643 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C41845%2C1731361431777:(num 1731361506643) roll requested 2024-11-11T21:45:06,643 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.1731361506643 2024-11-11T21:45:06,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741843_1019 (size=438) 2024-11-11T21:45:06,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741843_1019 (size=438) 2024-11-11T21:45:06,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:45:06,646 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361475744 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs/22a1f13b90b0%2C41845%2C1731361431777.1731361475744 2024-11-11T21:45:06,647 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:45:06,648 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361486565 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs/22a1f13b90b0%2C41845%2C1731361431777.1731361486565 2024-11-11T21:45:06,650 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361491589 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs/22a1f13b90b0%2C41845%2C1731361431777.1731361491589 2024-11-11T21:45:06,651 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:45:06,652 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,652 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,652 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,652 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.HStore(1541): 1390f962e52fc2f0ff7949a317fc2221/info is initiating minor compaction (all files) 2024-11-11T21:45:06,652 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,653 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,653 INFO [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1390f962e52fc2f0ff7949a317fc2221/info in TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:45:06,653 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361501603 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361506643 2024-11-11T21:45:06,653 INFO [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3617734395084bfaaf4f8f1cbd97b561, hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3eba083c0a454d8689ecea3e2ce119d6, hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/8b44bc83509f42588b5bb7cf424ba318] into tmpdir=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp, totalSize=36.6 K 2024-11-11T21:45:06,654 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45669:45669),(127.0.0.1/127.0.0.1:43441:43441)] 2024-11-11T21:45:06,654 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361501603 is not closed yet, will try archiving it next time 2024-11-11T21:45:06,654 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41845%2C1731361431777.1731361506654 2024-11-11T21:45:06,655 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3617734395084bfaaf4f8f1cbd97b561, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731361444222 2024-11-11T21:45:06,656 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3eba083c0a454d8689ecea3e2ce119d6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731361458262 2024-11-11T21:45:06,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741845_1021 (size=93) 2024-11-11T21:45:06,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741845_1021 (size=93) 2024-11-11T21:45:06,658 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8b44bc83509f42588b5bb7cf424ba318, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731361473323 2024-11-11T21:45:06,658 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361501603 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs/22a1f13b90b0%2C41845%2C1731361431777.1731361501603 2024-11-11T21:45:06,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,667 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,668 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,668 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,668 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:06,668 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361506643 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361506654 2024-11-11T21:45:06,669 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43441:43441),(127.0.0.1/127.0.0.1:45669:45669)] 2024-11-11T21:45:06,669 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/WALs/22a1f13b90b0,41845,1731361431777/22a1f13b90b0%2C41845%2C1731361431777.1731361506643 is not closed yet, will try archiving it next time 2024-11-11T21:45:06,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741846_1022 (size=1258) 2024-11-11T21:45:06,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741846_1022 (size=1258) 2024-11-11T21:45:06,689 INFO [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1390f962e52fc2f0ff7949a317fc2221#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:45:06,691 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/84ab2b89ef824ff8b9f3176ed29f7b33 is 1080, key is row0001/info:/1731361444222/Put/seqid=0 2024-11-11T21:45:06,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741848_1024 (size=27710) 2024-11-11T21:45:06,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741848_1024 (size=27710) 2024-11-11T21:45:06,708 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/84ab2b89ef824ff8b9f3176ed29f7b33 as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/84ab2b89ef824ff8b9f3176ed29f7b33 2024-11-11T21:45:06,724 INFO [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1390f962e52fc2f0ff7949a317fc2221/info of 1390f962e52fc2f0ff7949a317fc2221 into 84ab2b89ef824ff8b9f3176ed29f7b33(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:45:06,724 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1390f962e52fc2f0ff7949a317fc2221: 2024-11-11T21:45:06,725 INFO [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221., storeName=1390f962e52fc2f0ff7949a317fc2221/info, priority=13, startTime=1731361506642; duration=0sec 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/84ab2b89ef824ff8b9f3176ed29f7b33 because midkey is the same as first or last row 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/84ab2b89ef824ff8b9f3176ed29f7b33 because midkey is the same as first or last row 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/84ab2b89ef824ff8b9f3176ed29f7b33 because midkey is the same as first or last row 2024-11-11T21:45:06,726 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:45:06,727 DEBUG [RS:0;22a1f13b90b0:41845-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1390f962e52fc2f0ff7949a317fc2221:info 2024-11-11T21:45:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41845 {}] regionserver.HRegion(8855): Flush requested on 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:45:18,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1390f962e52fc2f0ff7949a317fc2221 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:45:18,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/4de30fd557754575b2c66624b8367df0 is 1080, key is row0022/info:/1731361506656/Put/seqid=0 2024-11-11T21:45:18,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741849_1025 (size=12509) 2024-11-11T21:45:18,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741849_1025 (size=12509) 2024-11-11T21:45:18,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/4de30fd557754575b2c66624b8367df0 2024-11-11T21:45:18,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/4de30fd557754575b2c66624b8367df0 as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/4de30fd557754575b2c66624b8367df0 2024-11-11T21:45:18,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/4de30fd557754575b2c66624b8367df0, entries=7, sequenceid=42, filesize=12.2 K 2024-11-11T21:45:18,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1390f962e52fc2f0ff7949a317fc2221 in 34ms, sequenceid=42, compaction requested=false 2024-11-11T21:45:18,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1390f962e52fc2f0ff7949a317fc2221: 2024-11-11T21:45:18,730 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-11T21:45:18,730 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:18,730 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/84ab2b89ef824ff8b9f3176ed29f7b33 because midkey is the same as first or last row 2024-11-11T21:45:20,073 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T21:45:24,570 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1390f962e52fc2f0ff7949a317fc2221, had cached 0 bytes from a total of 40219 2024-11-11T21:45:26,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T21:45:26,710 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:45:26,710 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:45:26,718 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:26,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:26,719 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T21:45:26,720 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T21:45:26,720 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=856553958, stopped=false 2024-11-11T21:45:26,720 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=22a1f13b90b0,45429,1731361431028 2024-11-11T21:45:26,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:45:26,723 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:45:26,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:26,723 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:26,723 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:45:26,723 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:45:26,723 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:45:26,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:26,723 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:45:26,723 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:45:26,724 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '22a1f13b90b0,41845,1731361431777' ***** 2024-11-11T21:45:26,724 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T21:45:26,724 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T21:45:26,724 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T21:45:26,724 INFO [RS:0;22a1f13b90b0:41845 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T21:45:26,725 INFO [RS:0;22a1f13b90b0:41845 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T21:45:26,725 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(3091): Received CLOSE for 1390f962e52fc2f0ff7949a317fc2221 2024-11-11T21:45:26,725 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(959): stopping server 22a1f13b90b0,41845,1731361431777 2024-11-11T21:45:26,725 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:45:26,725 INFO [RS:0;22a1f13b90b0:41845 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;22a1f13b90b0:41845. 2024-11-11T21:45:26,725 DEBUG [RS:0;22a1f13b90b0:41845 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:45:26,725 DEBUG [RS:0;22a1f13b90b0:41845 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:26,726 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1390f962e52fc2f0ff7949a317fc2221, disabling compactions & flushes 2024-11-11T21:45:26,726 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T21:45:26,726 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T21:45:26,726 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:45:26,726 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T21:45:26,726 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:45:26,726 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T21:45:26,726 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. after waiting 0 ms 2024-11-11T21:45:26,726 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:45:26,726 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T21:45:26,726 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1390f962e52fc2f0ff7949a317fc2221 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-11T21:45:26,726 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:45:26,726 DEBUG [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 1390f962e52fc2f0ff7949a317fc2221=TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.} 2024-11-11T21:45:26,726 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:45:26,726 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:45:26,726 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:45:26,726 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:45:26,727 DEBUG [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(1351): Waiting on 1390f962e52fc2f0ff7949a317fc2221, 1588230740 2024-11-11T21:45:26,727 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-11T21:45:26,733 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/e47ca085ecda4b5daf9187b526550059 is 1080, key is row0029/info:/1731361520698/Put/seqid=0 2024-11-11T21:45:26,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741850_1026 (size=8193) 2024-11-11T21:45:26,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741850_1026 (size=8193) 2024-11-11T21:45:26,740 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/e47ca085ecda4b5daf9187b526550059 2024-11-11T21:45:26,750 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/.tmp/info/e47ca085ecda4b5daf9187b526550059 as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/e47ca085ecda4b5daf9187b526550059 2024-11-11T21:45:26,751 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/.tmp/info/786682000c424b2c81cdd901ea3640ae is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221./info:regioninfo/1731361434613/Put/seqid=0 2024-11-11T21:45:26,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741851_1027 (size=7016) 2024-11-11T21:45:26,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741851_1027 (size=7016) 2024-11-11T21:45:26,758 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/.tmp/info/786682000c424b2c81cdd901ea3640ae 2024-11-11T21:45:26,760 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/e47ca085ecda4b5daf9187b526550059, entries=3, sequenceid=48, filesize=8.0 K 2024-11-11T21:45:26,762 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1390f962e52fc2f0ff7949a317fc2221 in 35ms, sequenceid=48, compaction requested=true 2024-11-11T21:45:26,762 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3617734395084bfaaf4f8f1cbd97b561, hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3eba083c0a454d8689ecea3e2ce119d6, hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/8b44bc83509f42588b5bb7cf424ba318] to archive 2024-11-11T21:45:26,765 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T21:45:26,768 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3617734395084bfaaf4f8f1cbd97b561 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3617734395084bfaaf4f8f1cbd97b561 2024-11-11T21:45:26,770 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3eba083c0a454d8689ecea3e2ce119d6 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/3eba083c0a454d8689ecea3e2ce119d6 2024-11-11T21:45:26,772 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/8b44bc83509f42588b5bb7cf424ba318 to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/info/8b44bc83509f42588b5bb7cf424ba318 2024-11-11T21:45:26,785 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/.tmp/ns/9e4bcdfc75f5499bb8a286ebf5683a09 is 43, key is default/ns:d/1731361433813/Put/seqid=0 2024-11-11T21:45:26,784 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=22a1f13b90b0:45429 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T21:45:26,788 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3617734395084bfaaf4f8f1cbd97b561=12509, 3eba083c0a454d8689ecea3e2ce119d6=12509, 8b44bc83509f42588b5bb7cf424ba318=12509] 2024-11-11T21:45:26,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741852_1028 (size=5153) 2024-11-11T21:45:26,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741852_1028 (size=5153) 2024-11-11T21:45:26,792 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/.tmp/ns/9e4bcdfc75f5499bb8a286ebf5683a09 2024-11-11T21:45:26,794 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/default/TestLogRolling-testSlowSyncLogRolling/1390f962e52fc2f0ff7949a317fc2221/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-11T21:45:26,797 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:45:26,797 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1390f962e52fc2f0ff7949a317fc2221: Waiting for close lock at 1731361526725Running coprocessor pre-close hooks at 1731361526725Disabling compacts and flushes for region at 1731361526725Disabling writes for close at 1731361526726 (+1 ms)Obtaining lock to block concurrent updates at 1731361526726Preparing flush snapshotting stores in 1390f962e52fc2f0ff7949a317fc2221 at 1731361526726Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731361526727 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. at 1731361526728 (+1 ms)Flushing 1390f962e52fc2f0ff7949a317fc2221/info: creating writer at 1731361526728Flushing 1390f962e52fc2f0ff7949a317fc2221/info: appending metadata at 1731361526732 (+4 ms)Flushing 1390f962e52fc2f0ff7949a317fc2221/info: closing flushed file at 1731361526732Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e1a282c: reopening flushed file at 1731361526749 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1390f962e52fc2f0ff7949a317fc2221 in 35ms, sequenceid=48, compaction requested=true at 1731361526762 (+13 ms)Writing region close event to WAL at 1731361526789 (+27 ms)Running coprocessor post-close hooks at 1731361526795 (+6 ms)Closed at 1731361526797 (+2 ms) 2024-11-11T21:45:26,798 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731361434126.1390f962e52fc2f0ff7949a317fc2221. 2024-11-11T21:45:26,816 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/.tmp/table/b527a3be09e146668b5989bdea988d6a is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731361434642/Put/seqid=0 2024-11-11T21:45:26,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741853_1029 (size=5396) 2024-11-11T21:45:26,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741853_1029 (size=5396) 2024-11-11T21:45:26,822 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/.tmp/table/b527a3be09e146668b5989bdea988d6a 2024-11-11T21:45:26,831 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/.tmp/info/786682000c424b2c81cdd901ea3640ae as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/info/786682000c424b2c81cdd901ea3640ae 2024-11-11T21:45:26,841 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/info/786682000c424b2c81cdd901ea3640ae, entries=10, sequenceid=11, filesize=6.9 K 2024-11-11T21:45:26,843 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/.tmp/ns/9e4bcdfc75f5499bb8a286ebf5683a09 as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/ns/9e4bcdfc75f5499bb8a286ebf5683a09 2024-11-11T21:45:26,852 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/ns/9e4bcdfc75f5499bb8a286ebf5683a09, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T21:45:26,853 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/.tmp/table/b527a3be09e146668b5989bdea988d6a as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/table/b527a3be09e146668b5989bdea988d6a 2024-11-11T21:45:26,861 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/table/b527a3be09e146668b5989bdea988d6a, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T21:45:26,863 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false 2024-11-11T21:45:26,868 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T21:45:26,869 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:45:26,870 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:45:26,870 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361526726Running coprocessor pre-close hooks at 1731361526726Disabling compacts and flushes for region at 1731361526726Disabling writes for close at 1731361526726Obtaining lock to block concurrent updates at 1731361526727 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731361526727Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731361526727Flushing stores of hbase:meta,,1.1588230740 at 1731361526728 (+1 ms)Flushing 1588230740/info: creating writer at 1731361526728Flushing 1588230740/info: appending metadata at 1731361526750 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731361526750Flushing 1588230740/ns: creating writer at 1731361526766 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731361526785 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731361526785Flushing 1588230740/table: creating writer at 1731361526800 (+15 ms)Flushing 1588230740/table: appending metadata at 1731361526815 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731361526815Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c223fd0: reopening flushed file at 1731361526830 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e8bb74b: reopening flushed file at 1731361526842 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30241aff: reopening flushed file at 1731361526852 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false at 1731361526863 (+11 ms)Writing region close event to WAL at 1731361526864 (+1 ms)Running coprocessor post-close hooks at 1731361526869 (+5 ms)Closed at 1731361526869 2024-11-11T21:45:26,870 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T21:45:26,927 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(976): stopping server 22a1f13b90b0,41845,1731361431777; all regions closed. 2024-11-11T21:45:26,929 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,929 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,929 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,929 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,930 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741834_1010 (size=3066) 2024-11-11T21:45:26,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741834_1010 (size=3066) 2024-11-11T21:45:26,939 DEBUG [RS:0;22a1f13b90b0:41845 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs 2024-11-11T21:45:26,939 INFO [RS:0;22a1f13b90b0:41845 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C41845%2C1731361431777.meta:.meta(num 1731361433636) 2024-11-11T21:45:26,940 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,940 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,940 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,940 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,940 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:26,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741847_1023 (size=12695) 2024-11-11T21:45:26,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741847_1023 (size=12695) 2024-11-11T21:45:26,947 DEBUG [RS:0;22a1f13b90b0:41845 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/oldWALs 2024-11-11T21:45:26,947 INFO [RS:0;22a1f13b90b0:41845 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C41845%2C1731361431777:(num 1731361506654) 2024-11-11T21:45:26,947 DEBUG [RS:0;22a1f13b90b0:41845 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:26,947 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:45:26,947 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:45:26,947 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.ChoreService(370): Chore service for: regionserver/22a1f13b90b0:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T21:45:26,947 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:45:26,947 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:45:26,948 INFO [RS:0;22a1f13b90b0:41845 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41845 2024-11-11T21:45:26,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:45:26,951 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/22a1f13b90b0,41845,1731361431777 2024-11-11T21:45:26,951 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:45:26,952 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [22a1f13b90b0,41845,1731361431777] 2024-11-11T21:45:26,953 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/22a1f13b90b0,41845,1731361431777 already deleted, retry=false 2024-11-11T21:45:26,953 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 22a1f13b90b0,41845,1731361431777 expired; onlineServers=0 2024-11-11T21:45:26,953 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '22a1f13b90b0,45429,1731361431028' ***** 2024-11-11T21:45:26,953 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T21:45:26,954 INFO [M:0;22a1f13b90b0:45429 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:45:26,954 INFO [M:0;22a1f13b90b0:45429 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:45:26,954 DEBUG [M:0;22a1f13b90b0:45429 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T21:45:26,954 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T21:45:26,954 DEBUG [M:0;22a1f13b90b0:45429 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T21:45:26,954 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361432884 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361432884,5,FailOnTimeoutGroup] 2024-11-11T21:45:26,954 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361432880 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361432880,5,FailOnTimeoutGroup] 2024-11-11T21:45:26,954 INFO [M:0;22a1f13b90b0:45429 {}] hbase.ChoreService(370): Chore service for: master/22a1f13b90b0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T21:45:26,954 INFO [M:0;22a1f13b90b0:45429 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:45:26,954 DEBUG [M:0;22a1f13b90b0:45429 {}] master.HMaster(1795): Stopping service threads 2024-11-11T21:45:26,954 INFO [M:0;22a1f13b90b0:45429 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T21:45:26,955 INFO [M:0;22a1f13b90b0:45429 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:45:26,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T21:45:26,955 INFO [M:0;22a1f13b90b0:45429 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T21:45:26,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:26,955 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T21:45:26,956 DEBUG [M:0;22a1f13b90b0:45429 {}] zookeeper.ZKUtil(347): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T21:45:26,956 WARN [M:0;22a1f13b90b0:45429 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T21:45:26,956 INFO [M:0;22a1f13b90b0:45429 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/.lastflushedseqids 2024-11-11T21:45:26,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741854_1030 (size=130) 2024-11-11T21:45:26,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741854_1030 (size=130) 2024-11-11T21:45:26,968 INFO [M:0;22a1f13b90b0:45429 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T21:45:26,968 INFO [M:0;22a1f13b90b0:45429 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T21:45:26,969 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:45:26,969 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:26,969 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:26,969 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:45:26,969 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:26,969 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-11T21:45:26,988 DEBUG [M:0;22a1f13b90b0:45429 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/46f75ab6b137441b9c0d9499ad7fce69 is 82, key is hbase:meta,,1/info:regioninfo/1731361433733/Put/seqid=0 2024-11-11T21:45:26,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741855_1031 (size=5672) 2024-11-11T21:45:26,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741855_1031 (size=5672) 2024-11-11T21:45:26,994 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/46f75ab6b137441b9c0d9499ad7fce69 2024-11-11T21:45:27,017 DEBUG [M:0;22a1f13b90b0:45429 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/98ccfd81a95d4a8a9692fe1d5809b53e is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731361434651/Put/seqid=0 2024-11-11T21:45:27,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741856_1032 (size=6248) 2024-11-11T21:45:27,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741856_1032 (size=6248) 2024-11-11T21:45:27,023 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/98ccfd81a95d4a8a9692fe1d5809b53e 2024-11-11T21:45:27,028 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 98ccfd81a95d4a8a9692fe1d5809b53e 2024-11-11T21:45:27,044 DEBUG [M:0;22a1f13b90b0:45429 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/40706b8c8a1b473b8977c694d2d27caa is 69, key is 22a1f13b90b0,41845,1731361431777/rs:state/1731361432943/Put/seqid=0 2024-11-11T21:45:27,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741857_1033 (size=5156) 2024-11-11T21:45:27,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741857_1033 (size=5156) 2024-11-11T21:45:27,050 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/40706b8c8a1b473b8977c694d2d27caa 2024-11-11T21:45:27,053 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:45:27,053 DEBUG [pool-51-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41845-0x10030be36e10001, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:45:27,053 INFO [RS:0;22a1f13b90b0:41845 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:45:27,053 INFO [RS:0;22a1f13b90b0:41845 {}] regionserver.HRegionServer(1031): Exiting; stopping=22a1f13b90b0,41845,1731361431777; zookeeper connection closed. 2024-11-11T21:45:27,054 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@93a3585 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@93a3585 2024-11-11T21:45:27,054 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T21:45:27,056 INFO [regionserver/22a1f13b90b0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:45:27,071 DEBUG [M:0;22a1f13b90b0:45429 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb89ac2636684b1984244a796eef38f7 is 52, key is load_balancer_on/state:d/1731361434094/Put/seqid=0 2024-11-11T21:45:27,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741858_1034 (size=5056) 2024-11-11T21:45:27,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741858_1034 (size=5056) 2024-11-11T21:45:27,077 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb89ac2636684b1984244a796eef38f7 2024-11-11T21:45:27,085 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/46f75ab6b137441b9c0d9499ad7fce69 as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/46f75ab6b137441b9c0d9499ad7fce69 2024-11-11T21:45:27,091 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/46f75ab6b137441b9c0d9499ad7fce69, entries=8, sequenceid=59, filesize=5.5 K 2024-11-11T21:45:27,092 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/98ccfd81a95d4a8a9692fe1d5809b53e as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/98ccfd81a95d4a8a9692fe1d5809b53e 2024-11-11T21:45:27,099 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 98ccfd81a95d4a8a9692fe1d5809b53e 2024-11-11T21:45:27,099 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/98ccfd81a95d4a8a9692fe1d5809b53e, entries=6, sequenceid=59, filesize=6.1 K 2024-11-11T21:45:27,100 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/40706b8c8a1b473b8977c694d2d27caa as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/40706b8c8a1b473b8977c694d2d27caa 2024-11-11T21:45:27,107 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/40706b8c8a1b473b8977c694d2d27caa, entries=1, sequenceid=59, filesize=5.0 K 2024-11-11T21:45:27,108 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fb89ac2636684b1984244a796eef38f7 as hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fb89ac2636684b1984244a796eef38f7 2024-11-11T21:45:27,114 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fb89ac2636684b1984244a796eef38f7, entries=1, sequenceid=59, filesize=4.9 K 2024-11-11T21:45:27,116 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=59, compaction requested=false 2024-11-11T21:45:27,118 INFO [M:0;22a1f13b90b0:45429 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:27,118 DEBUG [M:0;22a1f13b90b0:45429 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361526968Disabling compacts and flushes for region at 1731361526968Disabling writes for close at 1731361526969 (+1 ms)Obtaining lock to block concurrent updates at 1731361526969Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731361526969Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731361526969Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731361526970 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731361526970Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731361526987 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731361526987Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731361527000 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731361527016 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731361527016Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731361527029 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731361527043 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731361527044 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731361527056 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731361527071 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731361527071Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56265f6c: reopening flushed file at 1731361527083 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a756c16: reopening flushed file at 1731361527091 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@382bfc10: reopening flushed file at 1731361527099 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b2c1142: reopening flushed file at 1731361527107 (+8 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=59, compaction requested=false at 1731361527116 (+9 ms)Writing region close event to WAL at 1731361527118 (+2 ms)Closed at 1731361527118 2024-11-11T21:45:27,119 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:27,119 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:27,119 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:27,119 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:27,119 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:27,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39997 is added to blk_1073741830_1006 (size=27985) 2024-11-11T21:45:27,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40991 is added to blk_1073741830_1006 (size=27985) 2024-11-11T21:45:27,122 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:45:27,122 INFO [M:0;22a1f13b90b0:45429 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T21:45:27,122 INFO [M:0;22a1f13b90b0:45429 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45429 2024-11-11T21:45:27,123 INFO [M:0;22a1f13b90b0:45429 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:45:27,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:45:27,224 INFO [M:0;22a1f13b90b0:45429 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:45:27,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45429-0x10030be36e10000, quorum=127.0.0.1:50357, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:45:27,234 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ca1952e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:27,237 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75639b0e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:27,237 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:27,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:27,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:27,242 WARN [BP-580669111-172.17.0.2-1731361427964 heartbeating to localhost/127.0.0.1:42807 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:45:27,242 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:45:27,242 WARN [BP-580669111-172.17.0.2-1731361427964 heartbeating to localhost/127.0.0.1:42807 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-580669111-172.17.0.2-1731361427964 (Datanode Uuid f4f8cb69-1fd7-4c2d-a53b-9e4fb83b2718) service to localhost/127.0.0.1:42807 2024-11-11T21:45:27,242 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:45:27,243 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/data/data3/current/BP-580669111-172.17.0.2-1731361427964 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:27,243 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/data/data4/current/BP-580669111-172.17.0.2-1731361427964 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:27,243 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:45:27,245 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ca8488f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:27,246 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@dc1ca4f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:27,246 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:27,246 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:27,246 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:27,248 WARN [BP-580669111-172.17.0.2-1731361427964 heartbeating to localhost/127.0.0.1:42807 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:45:27,248 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:45:27,248 WARN [BP-580669111-172.17.0.2-1731361427964 heartbeating to localhost/127.0.0.1:42807 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-580669111-172.17.0.2-1731361427964 (Datanode Uuid aa07b05a-66fd-4af1-ad17-01ade69c1b07) service to localhost/127.0.0.1:42807 2024-11-11T21:45:27,248 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:45:27,248 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/data/data1/current/BP-580669111-172.17.0.2-1731361427964 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:27,248 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/cluster_0e89ae30-ccf9-d539-b09d-bd8a9a75d5c5/data/data2/current/BP-580669111-172.17.0.2-1731361427964 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:27,249 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:45:27,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:45:27,258 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:27,258 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:27,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:27,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:27,266 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T21:45:27,297 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T21:45:27,306 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=76 (was 12) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42807 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:42807 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:42807 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42807 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:42807 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42807 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:42807 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/22a1f13b90b0:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/22a1f13b90b0:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42807 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@59b906bd java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/22a1f13b90b0:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=149 (was 402), ProcessCount=11 (was 11), AvailableMemoryMB=5150 (was 5695) 2024-11-11T21:45:27,312 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=77, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=149, ProcessCount=11, AvailableMemoryMB=5149 2024-11-11T21:45:27,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T21:45:27,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.log.dir so I do NOT create it in target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0 2024-11-11T21:45:27,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c74827a3-0f61-4530-9706-99ef57c7b54e/hadoop.tmp.dir so I do NOT create it in target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0 2024-11-11T21:45:27,312 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54, deleteOnExit=true 2024-11-11T21:45:27,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T21:45:27,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/test.cache.data in system properties and HBase conf 2024-11-11T21:45:27,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T21:45:27,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.log.dir in system properties and HBase conf 2024-11-11T21:45:27,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T21:45:27,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T21:45:27,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T21:45:27,313 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T21:45:27,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:45:27,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/nfs.dump.dir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/java.io.tmpdir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T21:45:27,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T21:45:27,326 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:45:27,377 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:27,383 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:27,385 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:27,385 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:27,385 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T21:45:27,386 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:27,386 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c83318b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:27,386 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e979f8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:27,482 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@521ea93{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/java.io.tmpdir/jetty-localhost-35719-hadoop-hdfs-3_4_1-tests_jar-_-any-8048191604685406239/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:45:27,482 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3197ca45{HTTP/1.1, (http/1.1)}{localhost:35719} 2024-11-11T21:45:27,482 INFO [Time-limited test {}] server.Server(415): Started @101882ms 2024-11-11T21:45:27,495 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:45:27,547 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:27,552 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:27,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:27,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:27,553 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T21:45:27,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31d619c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:27,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48bb784e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:27,649 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@737d6115{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/java.io.tmpdir/jetty-localhost-39807-hadoop-hdfs-3_4_1-tests_jar-_-any-17880004167488242270/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:27,650 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4edee9ab{HTTP/1.1, (http/1.1)}{localhost:39807} 2024-11-11T21:45:27,650 INFO [Time-limited test {}] server.Server(415): Started @102049ms 2024-11-11T21:45:27,651 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:45:27,685 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:27,689 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:27,690 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:27,690 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:27,690 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:45:27,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58319158{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:27,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f11c100{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:27,717 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/data/data2/current/BP-1834037961-172.17.0.2-1731361527338/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:27,717 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/data/data1/current/BP-1834037961-172.17.0.2-1731361527338/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:27,733 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:45:27,735 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91471c98af26ec9 with lease ID 0xd779e62501292069: Processing first storage report for DS-90008b1e-c721-405a-bf5d-8506e5ab6e99 from datanode DatanodeRegistration(127.0.0.1:35783, datanodeUuid=6a149f9f-1c2d-4066-9822-a2b03126f336, infoPort=43993, infoSecurePort=0, ipcPort=38937, storageInfo=lv=-57;cid=testClusterID;nsid=1980875627;c=1731361527338) 2024-11-11T21:45:27,736 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91471c98af26ec9 with lease ID 0xd779e62501292069: from storage DS-90008b1e-c721-405a-bf5d-8506e5ab6e99 node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=6a149f9f-1c2d-4066-9822-a2b03126f336, infoPort=43993, infoSecurePort=0, ipcPort=38937, storageInfo=lv=-57;cid=testClusterID;nsid=1980875627;c=1731361527338), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:27,736 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91471c98af26ec9 with lease ID 0xd779e62501292069: Processing first storage report for DS-a18ab3fb-0a54-46b3-912c-57665be9e7a4 from datanode DatanodeRegistration(127.0.0.1:35783, datanodeUuid=6a149f9f-1c2d-4066-9822-a2b03126f336, infoPort=43993, infoSecurePort=0, ipcPort=38937, storageInfo=lv=-57;cid=testClusterID;nsid=1980875627;c=1731361527338) 2024-11-11T21:45:27,736 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91471c98af26ec9 with lease ID 0xd779e62501292069: from storage DS-a18ab3fb-0a54-46b3-912c-57665be9e7a4 node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=6a149f9f-1c2d-4066-9822-a2b03126f336, infoPort=43993, infoSecurePort=0, ipcPort=38937, storageInfo=lv=-57;cid=testClusterID;nsid=1980875627;c=1731361527338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:27,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2dd9bfa6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/java.io.tmpdir/jetty-localhost-32885-hadoop-hdfs-3_4_1-tests_jar-_-any-10294132934150248049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:27,791 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b44e274{HTTP/1.1, (http/1.1)}{localhost:32885} 2024-11-11T21:45:27,791 INFO [Time-limited test {}] server.Server(415): Started @102190ms 2024-11-11T21:45:27,792 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:45:27,855 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/data/data3/current/BP-1834037961-172.17.0.2-1731361527338/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:27,855 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/data/data4/current/BP-1834037961-172.17.0.2-1731361527338/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:27,873 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:45:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae2f0faa6738ef7d with lease ID 0xd779e6250129206a: Processing first storage report for DS-4d5a9a92-8f75-4279-b1b6-23553b3d36ba from datanode DatanodeRegistration(127.0.0.1:43827, datanodeUuid=908b6a07-04fc-495d-91a5-b262c44fe147, infoPort=45857, infoSecurePort=0, ipcPort=43379, storageInfo=lv=-57;cid=testClusterID;nsid=1980875627;c=1731361527338) 2024-11-11T21:45:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae2f0faa6738ef7d with lease ID 0xd779e6250129206a: from storage DS-4d5a9a92-8f75-4279-b1b6-23553b3d36ba node DatanodeRegistration(127.0.0.1:43827, datanodeUuid=908b6a07-04fc-495d-91a5-b262c44fe147, infoPort=45857, infoSecurePort=0, ipcPort=43379, storageInfo=lv=-57;cid=testClusterID;nsid=1980875627;c=1731361527338), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae2f0faa6738ef7d with lease ID 0xd779e6250129206a: Processing first storage report for DS-7ecba879-2a78-41c5-bff2-661fe233961f from datanode DatanodeRegistration(127.0.0.1:43827, datanodeUuid=908b6a07-04fc-495d-91a5-b262c44fe147, infoPort=45857, infoSecurePort=0, ipcPort=43379, storageInfo=lv=-57;cid=testClusterID;nsid=1980875627;c=1731361527338) 2024-11-11T21:45:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae2f0faa6738ef7d with lease ID 0xd779e6250129206a: from storage DS-7ecba879-2a78-41c5-bff2-661fe233961f node DatanodeRegistration(127.0.0.1:43827, datanodeUuid=908b6a07-04fc-495d-91a5-b262c44fe147, infoPort=45857, infoSecurePort=0, ipcPort=43379, storageInfo=lv=-57;cid=testClusterID;nsid=1980875627;c=1731361527338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:27,925 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0 2024-11-11T21:45:27,928 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/zookeeper_0, clientPort=52772, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T21:45:27,930 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52772 2024-11-11T21:45:27,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:27,932 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:27,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:45:27,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:45:27,945 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598 with version=8 2024-11-11T21:45:27,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/hbase-staging 2024-11-11T21:45:27,947 INFO [Time-limited test {}] client.ConnectionUtils(128): master/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:45:27,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:27,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:27,948 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:45:27,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:27,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:45:27,948 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T21:45:27,948 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:45:27,949 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34009 2024-11-11T21:45:27,950 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34009 connecting to ZooKeeper ensemble=127.0.0.1:52772 2024-11-11T21:45:27,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340090x0, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:45:27,954 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34009-0x10030bfb4e60000 connected 2024-11-11T21:45:27,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:27,968 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:27,971 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:45:27,972 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598, hbase.cluster.distributed=false 2024-11-11T21:45:27,974 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:45:27,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34009 2024-11-11T21:45:27,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34009 2024-11-11T21:45:27,975 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34009 2024-11-11T21:45:27,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34009 2024-11-11T21:45:27,976 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34009 2024-11-11T21:45:27,993 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:45:27,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:27,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:27,993 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:45:27,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:27,993 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:45:27,993 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T21:45:27,993 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:45:27,994 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46783 2024-11-11T21:45:27,996 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46783 connecting to ZooKeeper ensemble=127.0.0.1:52772 2024-11-11T21:45:27,996 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:27,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:28,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:467830x0, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:45:28,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:45:28,002 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46783-0x10030bfb4e60001 connected 2024-11-11T21:45:28,003 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T21:45:28,003 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T21:45:28,004 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T21:45:28,005 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:45:28,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46783 2024-11-11T21:45:28,005 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46783 2024-11-11T21:45:28,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46783 2024-11-11T21:45:28,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46783 2024-11-11T21:45:28,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46783 2024-11-11T21:45:28,021 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;22a1f13b90b0:34009 2024-11-11T21:45:28,021 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/22a1f13b90b0,34009,1731361527947 2024-11-11T21:45:28,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:45:28,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:45:28,023 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/22a1f13b90b0,34009,1731361527947 2024-11-11T21:45:28,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T21:45:28,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,025 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T21:45:28,025 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/22a1f13b90b0,34009,1731361527947 from backup master directory 2024-11-11T21:45:28,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/22a1f13b90b0,34009,1731361527947 2024-11-11T21:45:28,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:45:28,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:45:28,026 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:45:28,026 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=22a1f13b90b0,34009,1731361527947 2024-11-11T21:45:28,032 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/hbase.id] with ID: 41c6095d-6ac6-4858-9ef7-4ed411959333 2024-11-11T21:45:28,032 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/.tmp/hbase.id 2024-11-11T21:45:28,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:45:28,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:45:28,044 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/.tmp/hbase.id]:[hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/hbase.id] 2024-11-11T21:45:28,060 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:28,060 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T21:45:28,063 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-11T21:45:28,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:45:28,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:45:28,073 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:45:28,074 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T21:45:28,075 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:45:28,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:45:28,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:45:28,086 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store 2024-11-11T21:45:28,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:45:28,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:45:28,096 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:28,096 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:45:28,096 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:28,096 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:28,096 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:45:28,096 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:28,096 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:28,097 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361528096Disabling compacts and flushes for region at 1731361528096Disabling writes for close at 1731361528096Writing region close event to WAL at 1731361528096Closed at 1731361528096 2024-11-11T21:45:28,098 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/.initializing 2024-11-11T21:45:28,098 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/WALs/22a1f13b90b0,34009,1731361527947 2024-11-11T21:45:28,101 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C34009%2C1731361527947, suffix=, logDir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/WALs/22a1f13b90b0,34009,1731361527947, archiveDir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/oldWALs, maxLogs=10 2024-11-11T21:45:28,102 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C34009%2C1731361527947.1731361528102 2024-11-11T21:45:28,108 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/WALs/22a1f13b90b0,34009,1731361527947/22a1f13b90b0%2C34009%2C1731361527947.1731361528102 2024-11-11T21:45:28,109 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43993:43993),(127.0.0.1/127.0.0.1:45857:45857)] 2024-11-11T21:45:28,112 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:45:28,112 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:28,112 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,112 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T21:45:28,116 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:28,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T21:45:28,119 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:45:28,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T21:45:28,122 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,123 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:45:28,123 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T21:45:28,125 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:45:28,126 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,127 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,127 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,129 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,129 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,129 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T21:45:28,131 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:28,133 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:45:28,134 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870019, jitterRate=0.10628743469715118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T21:45:28,135 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731361528113Initializing all the Stores at 1731361528114 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361528114Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361528114Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361528114Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361528114Cleaning up temporary data from old regions at 1731361528129 (+15 ms)Region opened successfully at 1731361528135 (+6 ms) 2024-11-11T21:45:28,136 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T21:45:28,140 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47aa9fd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:45:28,141 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T21:45:28,141 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T21:45:28,141 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T21:45:28,141 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T21:45:28,142 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T21:45:28,143 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T21:45:28,143 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T21:45:28,145 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T21:45:28,146 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T21:45:28,147 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T21:45:28,147 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T21:45:28,148 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T21:45:28,149 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T21:45:28,149 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T21:45:28,150 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T21:45:28,151 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T21:45:28,153 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T21:45:28,154 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T21:45:28,156 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T21:45:28,157 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T21:45:28,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:45:28,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:45:28,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,159 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=22a1f13b90b0,34009,1731361527947, sessionid=0x10030bfb4e60000, setting cluster-up flag (Was=false) 2024-11-11T21:45:28,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,165 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T21:45:28,166 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,34009,1731361527947 2024-11-11T21:45:28,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,172 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T21:45:28,173 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,34009,1731361527947 2024-11-11T21:45:28,175 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T21:45:28,176 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T21:45:28,177 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T21:45:28,177 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T21:45:28,177 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 22a1f13b90b0,34009,1731361527947 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T21:45:28,178 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:45:28,178 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:45:28,178 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:45:28,178 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:45:28,178 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/22a1f13b90b0:0, corePoolSize=10, maxPoolSize=10 2024-11-11T21:45:28,179 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,179 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:45:28,179 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,181 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731361558181 2024-11-11T21:45:28,181 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T21:45:28,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T21:45:28,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T21:45:28,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T21:45:28,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T21:45:28,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T21:45:28,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T21:45:28,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T21:45:28,183 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T21:45:28,183 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T21:45:28,183 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:45:28,183 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T21:45:28,183 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T21:45:28,183 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361528183,5,FailOnTimeoutGroup] 2024-11-11T21:45:28,183 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361528183,5,FailOnTimeoutGroup] 2024-11-11T21:45:28,184 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,184 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T21:45:28,184 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,184 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,185 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,185 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T21:45:28,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:45:28,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:45:28,197 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T21:45:28,197 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598 2024-11-11T21:45:28,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:45:28,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:45:28,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:28,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:45:28,211 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:45:28,211 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,211 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(746): ClusterId : 41c6095d-6ac6-4858-9ef7-4ed411959333 2024-11-11T21:45:28,211 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T21:45:28,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:28,212 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:45:28,213 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T21:45:28,213 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T21:45:28,214 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:45:28,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:28,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:45:28,215 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T21:45:28,216 DEBUG [RS:0;22a1f13b90b0:46783 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5610c897, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:45:28,216 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:45:28,216 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:28,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:45:28,219 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:45:28,219 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:28,220 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:45:28,221 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740 2024-11-11T21:45:28,221 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740 2024-11-11T21:45:28,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:45:28,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:45:28,223 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:45:28,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:45:28,227 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:45:28,228 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871840, jitterRate=0.10860297083854675}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:45:28,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731361528208Initializing all the Stores at 1731361528209 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361528209Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361528209Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361528209Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361528209Cleaning up temporary data from old regions at 1731361528222 (+13 ms)Region opened successfully at 1731361528229 (+7 ms) 2024-11-11T21:45:28,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:45:28,229 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:45:28,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:45:28,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:45:28,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:45:28,229 DEBUG [RS:0;22a1f13b90b0:46783 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;22a1f13b90b0:46783 2024-11-11T21:45:28,229 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T21:45:28,229 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T21:45:28,229 DEBUG [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T21:45:28,230 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:45:28,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361528229Disabling compacts and flushes for region at 1731361528229Disabling writes for close at 1731361528229Writing region close event to WAL at 1731361528230 (+1 ms)Closed at 1731361528230 2024-11-11T21:45:28,231 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(2659): reportForDuty to master=22a1f13b90b0,34009,1731361527947 with port=46783, startcode=1731361527992 2024-11-11T21:45:28,231 DEBUG [RS:0;22a1f13b90b0:46783 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T21:45:28,231 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:45:28,232 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T21:45:28,232 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T21:45:28,234 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:45:28,235 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32933, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T21:45:28,235 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T21:45:28,236 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34009 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,236 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34009 {}] master.ServerManager(517): Registering regionserver=22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,238 DEBUG [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598 2024-11-11T21:45:28,238 DEBUG [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36327 2024-11-11T21:45:28,238 DEBUG [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T21:45:28,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:45:28,240 DEBUG [RS:0;22a1f13b90b0:46783 {}] zookeeper.ZKUtil(111): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,240 WARN [RS:0;22a1f13b90b0:46783 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:45:28,240 INFO [RS:0;22a1f13b90b0:46783 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:45:28,240 DEBUG [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/WALs/22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,241 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [22a1f13b90b0,46783,1731361527992] 2024-11-11T21:45:28,245 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T21:45:28,248 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T21:45:28,249 INFO [RS:0;22a1f13b90b0:46783 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T21:45:28,249 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,249 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T21:45:28,250 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T21:45:28,250 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,250 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,250 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:45:28,251 DEBUG [RS:0;22a1f13b90b0:46783 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:45:28,253 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,253 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,253 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,253 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,253 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,253 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,46783,1731361527992-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:45:28,268 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T21:45:28,268 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,46783,1731361527992-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,269 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,269 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.Replication(171): 22a1f13b90b0,46783,1731361527992 started 2024-11-11T21:45:28,284 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,284 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(1482): Serving as 22a1f13b90b0,46783,1731361527992, RpcServer on 22a1f13b90b0/172.17.0.2:46783, sessionid=0x10030bfb4e60001 2024-11-11T21:45:28,284 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T21:45:28,284 DEBUG [RS:0;22a1f13b90b0:46783 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,284 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,46783,1731361527992' 2024-11-11T21:45:28,284 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T21:45:28,285 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T21:45:28,285 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T21:45:28,285 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T21:45:28,285 DEBUG [RS:0;22a1f13b90b0:46783 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,285 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,46783,1731361527992' 2024-11-11T21:45:28,285 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T21:45:28,286 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T21:45:28,286 DEBUG [RS:0;22a1f13b90b0:46783 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T21:45:28,286 INFO [RS:0;22a1f13b90b0:46783 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T21:45:28,286 INFO [RS:0;22a1f13b90b0:46783 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T21:45:28,386 WARN [22a1f13b90b0:34009 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T21:45:28,391 INFO [RS:0;22a1f13b90b0:46783 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C46783%2C1731361527992, suffix=, logDir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/WALs/22a1f13b90b0,46783,1731361527992, archiveDir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/oldWALs, maxLogs=32 2024-11-11T21:45:28,396 INFO [RS:0;22a1f13b90b0:46783 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C46783%2C1731361527992.1731361528395 2024-11-11T21:45:28,404 INFO [RS:0;22a1f13b90b0:46783 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/WALs/22a1f13b90b0,46783,1731361527992/22a1f13b90b0%2C46783%2C1731361527992.1731361528395 2024-11-11T21:45:28,406 DEBUG [RS:0;22a1f13b90b0:46783 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43993:43993),(127.0.0.1/127.0.0.1:45857:45857)] 2024-11-11T21:45:28,636 DEBUG [22a1f13b90b0:34009 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T21:45:28,638 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,644 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,46783,1731361527992, state=OPENING 2024-11-11T21:45:28,647 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T21:45:28,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,651 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:45:28,652 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:45:28,652 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:45:28,652 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,46783,1731361527992}] 2024-11-11T21:45:28,807 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T21:45:28,813 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34213, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T21:45:28,821 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T21:45:28,822 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:45:28,824 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C46783%2C1731361527992.meta, suffix=.meta, logDir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/WALs/22a1f13b90b0,46783,1731361527992, archiveDir=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/oldWALs, maxLogs=32 2024-11-11T21:45:28,827 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C46783%2C1731361527992.meta.1731361528827.meta 2024-11-11T21:45:28,834 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/WALs/22a1f13b90b0,46783,1731361527992/22a1f13b90b0%2C46783%2C1731361527992.meta.1731361528827.meta 2024-11-11T21:45:28,836 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43993:43993),(127.0.0.1/127.0.0.1:45857:45857)] 2024-11-11T21:45:28,837 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:45:28,838 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T21:45:28,838 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T21:45:28,838 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T21:45:28,838 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T21:45:28,838 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:28,838 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T21:45:28,838 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T21:45:28,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:45:28,841 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:45:28,841 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:28,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:45:28,843 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:45:28,843 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:28,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:45:28,845 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:45:28,845 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:28,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:45:28,847 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:45:28,847 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:28,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:28,847 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:45:28,848 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740 2024-11-11T21:45:28,849 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740 2024-11-11T21:45:28,851 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:45:28,851 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:45:28,851 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:45:28,853 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:45:28,853 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=756544, jitterRate=-0.038005754351615906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:45:28,854 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T21:45:28,854 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731361528839Writing region info on filesystem at 1731361528839Initializing all the Stores at 1731361528840 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361528840Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361528840Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361528840Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361528840Cleaning up temporary data from old regions at 1731361528851 (+11 ms)Running coprocessor post-open hooks at 1731361528854 (+3 ms)Region opened successfully at 1731361528854 2024-11-11T21:45:28,856 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731361528806 2024-11-11T21:45:28,858 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T21:45:28,858 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T21:45:28,859 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,861 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,46783,1731361527992, state=OPEN 2024-11-11T21:45:28,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:45:28,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:45:28,863 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,863 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:45:28,863 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:45:28,866 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T21:45:28,866 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,46783,1731361527992 in 211 msec 2024-11-11T21:45:28,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T21:45:28,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 634 msec 2024-11-11T21:45:28,870 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:45:28,870 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T21:45:28,872 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:45:28,872 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,46783,1731361527992, seqNum=-1] 2024-11-11T21:45:28,873 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:45:28,874 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49505, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:45:28,882 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 704 msec 2024-11-11T21:45:28,882 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731361528882, completionTime=-1 2024-11-11T21:45:28,882 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T21:45:28,882 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T21:45:28,884 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T21:45:28,884 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731361588884 2024-11-11T21:45:28,884 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731361648884 2024-11-11T21:45:28,884 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-11T21:45:28,885 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,34009,1731361527947-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,885 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,34009,1731361527947-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,885 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,34009,1731361527947-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,885 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-22a1f13b90b0:34009, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,885 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,885 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,888 DEBUG [master/22a1f13b90b0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T21:45:28,890 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.863sec 2024-11-11T21:45:28,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T21:45:28,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T21:45:28,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T21:45:28,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T21:45:28,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T21:45:28,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,34009,1731361527947-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:45:28,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,34009,1731361527947-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T21:45:28,894 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T21:45:28,894 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T21:45:28,894 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,34009,1731361527947-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:28,912 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b5e52bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:45:28,912 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 22a1f13b90b0,34009,-1 for getting cluster id 2024-11-11T21:45:28,912 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T21:45:28,914 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '41c6095d-6ac6-4858-9ef7-4ed411959333' 2024-11-11T21:45:28,915 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T21:45:28,915 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "41c6095d-6ac6-4858-9ef7-4ed411959333" 2024-11-11T21:45:28,915 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f852b64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:45:28,915 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [22a1f13b90b0,34009,-1] 2024-11-11T21:45:28,916 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T21:45:28,916 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:28,918 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58752, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T21:45:28,919 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@781d38e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:45:28,919 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:45:28,921 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,46783,1731361527992, seqNum=-1] 2024-11-11T21:45:28,921 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:45:28,923 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51134, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:45:28,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=22a1f13b90b0,34009,1731361527947 2024-11-11T21:45:28,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:28,931 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T21:45:28,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T21:45:28,931 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:45:28,932 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:45:28,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:28,932 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:28,932 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T21:45:28,932 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T21:45:28,932 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1469549761, stopped=false 2024-11-11T21:45:28,932 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=22a1f13b90b0,34009,1731361527947 2024-11-11T21:45:28,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:45:28,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:45:28,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:28,934 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:45:28,934 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:45:28,934 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:45:28,934 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:28,934 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:45:28,934 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:45:28,934 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '22a1f13b90b0,46783,1731361527992' ***** 2024-11-11T21:45:28,935 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T21:45:28,935 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T21:45:28,935 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T21:45:28,935 INFO [RS:0;22a1f13b90b0:46783 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T21:45:28,935 INFO [RS:0;22a1f13b90b0:46783 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T21:45:28,935 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(959): stopping server 22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:28,935 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:45:28,935 INFO [RS:0;22a1f13b90b0:46783 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;22a1f13b90b0:46783. 2024-11-11T21:45:28,935 DEBUG [RS:0;22a1f13b90b0:46783 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:45:28,935 DEBUG [RS:0;22a1f13b90b0:46783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:28,936 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T21:45:28,936 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T21:45:28,936 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T21:45:28,936 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T21:45:28,936 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T21:45:28,936 DEBUG [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T21:45:28,936 DEBUG [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T21:45:28,936 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:45:28,936 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:45:28,936 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:45:28,936 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:45:28,936 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:45:28,937 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-11T21:45:28,955 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740/.tmp/ns/1cc39e365edb4b31ae93931612c35e3c is 43, key is default/ns:d/1731361528875/Put/seqid=0 2024-11-11T21:45:28,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741835_1011 (size=5153) 2024-11-11T21:45:28,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741835_1011 (size=5153) 2024-11-11T21:45:28,962 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740/.tmp/ns/1cc39e365edb4b31ae93931612c35e3c 2024-11-11T21:45:28,972 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740/.tmp/ns/1cc39e365edb4b31ae93931612c35e3c as hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740/ns/1cc39e365edb4b31ae93931612c35e3c 2024-11-11T21:45:28,980 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740/ns/1cc39e365edb4b31ae93931612c35e3c, entries=2, sequenceid=6, filesize=5.0 K 2024-11-11T21:45:28,981 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false 2024-11-11T21:45:28,987 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T21:45:28,988 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:45:28,988 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:45:28,988 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361528936Running coprocessor pre-close hooks at 1731361528936Disabling compacts and flushes for region at 1731361528936Disabling writes for close at 1731361528936Obtaining lock to block concurrent updates at 1731361528937 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731361528937Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731361528937Flushing stores of hbase:meta,,1.1588230740 at 1731361528938 (+1 ms)Flushing 1588230740/ns: creating writer at 1731361528938Flushing 1588230740/ns: appending metadata at 1731361528954 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731361528954Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a0f8008: reopening flushed file at 1731361528971 (+17 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false at 1731361528981 (+10 ms)Writing region close event to WAL at 1731361528982 (+1 ms)Running coprocessor post-close hooks at 1731361528988 (+6 ms)Closed at 1731361528988 2024-11-11T21:45:28,989 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T21:45:29,136 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(976): stopping server 22a1f13b90b0,46783,1731361527992; all regions closed. 2024-11-11T21:45:29,137 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,137 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,137 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,138 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,138 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741834_1010 (size=1152) 2024-11-11T21:45:29,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741834_1010 (size=1152) 2024-11-11T21:45:29,143 DEBUG [RS:0;22a1f13b90b0:46783 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/oldWALs 2024-11-11T21:45:29,143 INFO [RS:0;22a1f13b90b0:46783 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C46783%2C1731361527992.meta:.meta(num 1731361528827) 2024-11-11T21:45:29,144 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,144 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,144 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,144 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,144 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741833_1009 (size=93) 2024-11-11T21:45:29,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741833_1009 (size=93) 2024-11-11T21:45:29,149 DEBUG [RS:0;22a1f13b90b0:46783 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/oldWALs 2024-11-11T21:45:29,149 INFO [RS:0;22a1f13b90b0:46783 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C46783%2C1731361527992:(num 1731361528395) 2024-11-11T21:45:29,149 DEBUG [RS:0;22a1f13b90b0:46783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:29,149 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:45:29,150 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:45:29,150 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.ChoreService(370): Chore service for: regionserver/22a1f13b90b0:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T21:45:29,150 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:45:29,150 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:45:29,150 INFO [RS:0;22a1f13b90b0:46783 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46783 2024-11-11T21:45:29,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/22a1f13b90b0,46783,1731361527992 2024-11-11T21:45:29,152 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:45:29,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:45:29,152 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [22a1f13b90b0,46783,1731361527992] 2024-11-11T21:45:29,153 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/22a1f13b90b0,46783,1731361527992 already deleted, retry=false 2024-11-11T21:45:29,153 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 22a1f13b90b0,46783,1731361527992 expired; onlineServers=0 2024-11-11T21:45:29,153 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '22a1f13b90b0,34009,1731361527947' ***** 2024-11-11T21:45:29,154 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T21:45:29,154 INFO [M:0;22a1f13b90b0:34009 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:45:29,154 INFO [M:0;22a1f13b90b0:34009 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:45:29,154 DEBUG [M:0;22a1f13b90b0:34009 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T21:45:29,154 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T21:45:29,154 DEBUG [M:0;22a1f13b90b0:34009 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T21:45:29,154 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361528183 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361528183,5,FailOnTimeoutGroup] 2024-11-11T21:45:29,154 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361528183 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361528183,5,FailOnTimeoutGroup] 2024-11-11T21:45:29,154 INFO [M:0;22a1f13b90b0:34009 {}] hbase.ChoreService(370): Chore service for: master/22a1f13b90b0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T21:45:29,154 INFO [M:0;22a1f13b90b0:34009 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:45:29,154 DEBUG [M:0;22a1f13b90b0:34009 {}] master.HMaster(1795): Stopping service threads 2024-11-11T21:45:29,154 INFO [M:0;22a1f13b90b0:34009 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T21:45:29,154 INFO [M:0;22a1f13b90b0:34009 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:45:29,155 INFO [M:0;22a1f13b90b0:34009 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T21:45:29,155 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T21:45:29,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T21:45:29,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:29,155 DEBUG [M:0;22a1f13b90b0:34009 {}] zookeeper.ZKUtil(347): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T21:45:29,155 WARN [M:0;22a1f13b90b0:34009 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T21:45:29,156 INFO [M:0;22a1f13b90b0:34009 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/.lastflushedseqids 2024-11-11T21:45:29,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741836_1012 (size=99) 2024-11-11T21:45:29,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741836_1012 (size=99) 2024-11-11T21:45:29,162 INFO [M:0;22a1f13b90b0:34009 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T21:45:29,163 INFO [M:0;22a1f13b90b0:34009 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T21:45:29,163 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:45:29,163 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:29,163 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:29,163 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:45:29,163 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:29,163 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-11T21:45:29,181 DEBUG [M:0;22a1f13b90b0:34009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/39bf5f8289104246a0434db20ae2f9f8 is 82, key is hbase:meta,,1/info:regioninfo/1731361528859/Put/seqid=0 2024-11-11T21:45:29,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741837_1013 (size=5672) 2024-11-11T21:45:29,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741837_1013 (size=5672) 2024-11-11T21:45:29,188 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/39bf5f8289104246a0434db20ae2f9f8 2024-11-11T21:45:29,209 DEBUG [M:0;22a1f13b90b0:34009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/98eb1614746d4bfb83db066083e0dcb0 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731361528881/Put/seqid=0 2024-11-11T21:45:29,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741838_1014 (size=5275) 2024-11-11T21:45:29,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741838_1014 (size=5275) 2024-11-11T21:45:29,215 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/98eb1614746d4bfb83db066083e0dcb0 2024-11-11T21:45:29,237 DEBUG [M:0;22a1f13b90b0:34009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/195c149617e94e9bba72f1849023d398 is 69, key is 22a1f13b90b0,46783,1731361527992/rs:state/1731361528236/Put/seqid=0 2024-11-11T21:45:29,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741839_1015 (size=5156) 2024-11-11T21:45:29,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741839_1015 (size=5156) 2024-11-11T21:45:29,242 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/195c149617e94e9bba72f1849023d398 2024-11-11T21:45:29,253 INFO [RS:0;22a1f13b90b0:46783 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:45:29,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:45:29,253 INFO [RS:0;22a1f13b90b0:46783 {}] regionserver.HRegionServer(1031): Exiting; stopping=22a1f13b90b0,46783,1731361527992; zookeeper connection closed. 2024-11-11T21:45:29,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46783-0x10030bfb4e60001, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:45:29,253 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@594d302c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@594d302c 2024-11-11T21:45:29,253 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T21:45:29,266 DEBUG [M:0;22a1f13b90b0:34009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c282c31fa53c4014aa2428cc65ab1b84 is 52, key is load_balancer_on/state:d/1731361528929/Put/seqid=0 2024-11-11T21:45:29,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741840_1016 (size=5056) 2024-11-11T21:45:29,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741840_1016 (size=5056) 2024-11-11T21:45:29,273 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c282c31fa53c4014aa2428cc65ab1b84 2024-11-11T21:45:29,281 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/39bf5f8289104246a0434db20ae2f9f8 as hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/39bf5f8289104246a0434db20ae2f9f8 2024-11-11T21:45:29,287 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/39bf5f8289104246a0434db20ae2f9f8, entries=8, sequenceid=29, filesize=5.5 K 2024-11-11T21:45:29,289 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/98eb1614746d4bfb83db066083e0dcb0 as hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/98eb1614746d4bfb83db066083e0dcb0 2024-11-11T21:45:29,295 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/98eb1614746d4bfb83db066083e0dcb0, entries=3, sequenceid=29, filesize=5.2 K 2024-11-11T21:45:29,297 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/195c149617e94e9bba72f1849023d398 as hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/195c149617e94e9bba72f1849023d398 2024-11-11T21:45:29,303 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/195c149617e94e9bba72f1849023d398, entries=1, sequenceid=29, filesize=5.0 K 2024-11-11T21:45:29,304 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c282c31fa53c4014aa2428cc65ab1b84 as hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c282c31fa53c4014aa2428cc65ab1b84 2024-11-11T21:45:29,311 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36327/user/jenkins/test-data/55740778-ae88-e343-9e2b-9454345cb598/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c282c31fa53c4014aa2428cc65ab1b84, entries=1, sequenceid=29, filesize=4.9 K 2024-11-11T21:45:29,312 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false 2024-11-11T21:45:29,314 INFO [M:0;22a1f13b90b0:34009 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:29,314 DEBUG [M:0;22a1f13b90b0:34009 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361529163Disabling compacts and flushes for region at 1731361529163Disabling writes for close at 1731361529163Obtaining lock to block concurrent updates at 1731361529163Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731361529163Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731361529164 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731361529164Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731361529164Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731361529181 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731361529181Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731361529194 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731361529208 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731361529209 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731361529221 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731361529236 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731361529236Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731361529249 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731361529266 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731361529266Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b63b0d2: reopening flushed file at 1731361529279 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2108ffcd: reopening flushed file at 1731361529288 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16e65a98: reopening flushed file at 1731361529295 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@469b0557: reopening flushed file at 1731361529303 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false at 1731361529312 (+9 ms)Writing region close event to WAL at 1731361529314 (+2 ms)Closed at 1731361529314 2024-11-11T21:45:29,314 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,314 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,315 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,315 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,315 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:29,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43827 is added to blk_1073741830_1006 (size=10311) 2024-11-11T21:45:29,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741830_1006 (size=10311) 2024-11-11T21:45:29,318 INFO [M:0;22a1f13b90b0:34009 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T21:45:29,318 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:45:29,318 INFO [M:0;22a1f13b90b0:34009 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34009 2024-11-11T21:45:29,318 INFO [M:0;22a1f13b90b0:34009 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:45:29,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:45:29,420 INFO [M:0;22a1f13b90b0:34009 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:45:29,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34009-0x10030bfb4e60000, quorum=127.0.0.1:52772, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:45:29,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2dd9bfa6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:29,426 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b44e274{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:29,426 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:29,427 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f11c100{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:29,427 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58319158{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:29,430 WARN [BP-1834037961-172.17.0.2-1731361527338 heartbeating to localhost/127.0.0.1:36327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:45:29,430 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:45:29,430 WARN [BP-1834037961-172.17.0.2-1731361527338 heartbeating to localhost/127.0.0.1:36327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1834037961-172.17.0.2-1731361527338 (Datanode Uuid 908b6a07-04fc-495d-91a5-b262c44fe147) service to localhost/127.0.0.1:36327 2024-11-11T21:45:29,430 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:45:29,431 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/data/data3/current/BP-1834037961-172.17.0.2-1731361527338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:29,431 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/data/data4/current/BP-1834037961-172.17.0.2-1731361527338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:29,431 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:45:29,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@737d6115{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:29,434 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4edee9ab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:29,434 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:29,434 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48bb784e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:29,434 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31d619c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:29,435 WARN [BP-1834037961-172.17.0.2-1731361527338 heartbeating to localhost/127.0.0.1:36327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:45:29,435 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:45:29,435 WARN [BP-1834037961-172.17.0.2-1731361527338 heartbeating to localhost/127.0.0.1:36327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1834037961-172.17.0.2-1731361527338 (Datanode Uuid 6a149f9f-1c2d-4066-9822-a2b03126f336) service to localhost/127.0.0.1:36327 2024-11-11T21:45:29,435 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:45:29,436 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/data/data1/current/BP-1834037961-172.17.0.2-1731361527338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:29,436 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/cluster_193d385d-7517-ce77-37b1-d7cb7ec60e54/data/data2/current/BP-1834037961-172.17.0.2-1731361527338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:29,437 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:45:29,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@521ea93{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:45:29,443 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3197ca45{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:29,443 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:29,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e979f8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:29,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c83318b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:29,449 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T21:45:29,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T21:45:29,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T21:45:29,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.log.dir so I do NOT create it in target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d 2024-11-11T21:45:29,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a479fad4-547c-557b-5436-0058b35c9ee0/hadoop.tmp.dir so I do NOT create it in target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d 2024-11-11T21:45:29,465 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902, deleteOnExit=true 2024-11-11T21:45:29,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T21:45:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/test.cache.data in system properties and HBase conf 2024-11-11T21:45:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T21:45:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir in system properties and HBase conf 2024-11-11T21:45:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T21:45:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T21:45:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T21:45:29,466 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T21:45:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:45:29,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/nfs.dump.dir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/java.io.tmpdir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T21:45:29,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T21:45:29,480 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:45:29,529 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:29,534 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:29,537 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:29,537 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:29,538 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:45:29,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:29,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71ee126f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:29,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e7d5546{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:29,637 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@477187fb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/java.io.tmpdir/jetty-localhost-35743-hadoop-hdfs-3_4_1-tests_jar-_-any-8397310971716247367/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:45:29,638 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a488aac{HTTP/1.1, (http/1.1)}{localhost:35743} 2024-11-11T21:45:29,638 INFO [Time-limited test {}] server.Server(415): Started @104037ms 2024-11-11T21:45:29,650 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:45:29,702 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:29,705 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:29,708 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:29,708 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:29,708 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:45:29,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@764b5008{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:29,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7183cb8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:29,803 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5493a194{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/java.io.tmpdir/jetty-localhost-35781-hadoop-hdfs-3_4_1-tests_jar-_-any-14803321267573420785/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:29,804 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@257cf4bb{HTTP/1.1, (http/1.1)}{localhost:35781} 2024-11-11T21:45:29,804 INFO [Time-limited test {}] server.Server(415): Started @104203ms 2024-11-11T21:45:29,805 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:45:29,832 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:29,835 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:29,836 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:29,836 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:29,836 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:45:29,837 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2606b08f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:29,837 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7982676d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:29,866 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data1/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:29,866 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data2/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:29,885 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:45:29,893 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdfada0ddd7e70c4 with lease ID 0xaf8e50ebdfce26f8: Processing first storage report for DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8 from datanode DatanodeRegistration(127.0.0.1:39385, datanodeUuid=231bda69-9879-4159-b0fd-52114e939ab8, infoPort=45383, infoSecurePort=0, ipcPort=42557, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:29,893 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdfada0ddd7e70c4 with lease ID 0xaf8e50ebdfce26f8: from storage DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8 node DatanodeRegistration(127.0.0.1:39385, datanodeUuid=231bda69-9879-4159-b0fd-52114e939ab8, infoPort=45383, infoSecurePort=0, ipcPort=42557, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:29,893 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdfada0ddd7e70c4 with lease ID 0xaf8e50ebdfce26f8: Processing first storage report for DS-900fb593-4933-4125-887c-89d3881eac3f from datanode DatanodeRegistration(127.0.0.1:39385, datanodeUuid=231bda69-9879-4159-b0fd-52114e939ab8, infoPort=45383, infoSecurePort=0, ipcPort=42557, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:29,893 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdfada0ddd7e70c4 with lease ID 0xaf8e50ebdfce26f8: from storage DS-900fb593-4933-4125-887c-89d3881eac3f node DatanodeRegistration(127.0.0.1:39385, datanodeUuid=231bda69-9879-4159-b0fd-52114e939ab8, infoPort=45383, infoSecurePort=0, ipcPort=42557, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:29,940 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a9b2a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/java.io.tmpdir/jetty-localhost-45081-hadoop-hdfs-3_4_1-tests_jar-_-any-11095992396839041176/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:29,940 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69bbaec1{HTTP/1.1, (http/1.1)}{localhost:45081} 2024-11-11T21:45:29,940 INFO [Time-limited test {}] server.Server(415): Started @104339ms 2024-11-11T21:45:29,942 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:45:30,009 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data3/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:30,010 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data4/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:30,027 WARN [Thread-670 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:45:30,030 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb59434a27c15f74b with lease ID 0xaf8e50ebdfce26f9: Processing first storage report for DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf from datanode DatanodeRegistration(127.0.0.1:33285, datanodeUuid=8595ef5f-b595-4e43-8673-b19b1a241670, infoPort=35507, infoSecurePort=0, ipcPort=38569, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:30,030 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb59434a27c15f74b with lease ID 0xaf8e50ebdfce26f9: from storage DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf node DatanodeRegistration(127.0.0.1:33285, datanodeUuid=8595ef5f-b595-4e43-8673-b19b1a241670, infoPort=35507, infoSecurePort=0, ipcPort=38569, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:30,030 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb59434a27c15f74b with lease ID 0xaf8e50ebdfce26f9: Processing first storage report for DS-82714e5a-99b8-4b1a-84ff-85a0cf488710 from datanode DatanodeRegistration(127.0.0.1:33285, datanodeUuid=8595ef5f-b595-4e43-8673-b19b1a241670, infoPort=35507, infoSecurePort=0, ipcPort=38569, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:30,030 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb59434a27c15f74b with lease ID 0xaf8e50ebdfce26f9: from storage DS-82714e5a-99b8-4b1a-84ff-85a0cf488710 node DatanodeRegistration(127.0.0.1:33285, datanodeUuid=8595ef5f-b595-4e43-8673-b19b1a241670, infoPort=35507, infoSecurePort=0, ipcPort=38569, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:30,073 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d 2024-11-11T21:45:30,077 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/zookeeper_0, clientPort=64033, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T21:45:30,078 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64033 2024-11-11T21:45:30,079 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:30,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:30,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39385 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:45:30,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:45:30,094 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4 with version=8 2024-11-11T21:45:30,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/hbase-staging 2024-11-11T21:45:30,096 INFO [Time-limited test {}] client.ConnectionUtils(128): master/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:45:30,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:30,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:30,096 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:45:30,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:30,096 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:45:30,096 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T21:45:30,096 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:45:30,097 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42845 2024-11-11T21:45:30,099 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42845 connecting to ZooKeeper ensemble=127.0.0.1:64033 2024-11-11T21:45:30,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428450x0, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:45:30,103 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42845-0x10030bfbd4a0000 connected 2024-11-11T21:45:30,119 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:30,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:30,123 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:45:30,123 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4, hbase.cluster.distributed=false 2024-11-11T21:45:30,125 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:45:30,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42845 2024-11-11T21:45:30,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42845 2024-11-11T21:45:30,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42845 2024-11-11T21:45:30,133 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42845 2024-11-11T21:45:30,134 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42845 2024-11-11T21:45:30,149 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:45:30,149 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:30,149 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:30,149 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:45:30,149 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:30,150 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:45:30,150 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T21:45:30,150 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:45:30,150 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33181 2024-11-11T21:45:30,152 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33181 connecting to ZooKeeper ensemble=127.0.0.1:64033 2024-11-11T21:45:30,152 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:30,154 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:30,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331810x0, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:45:30,158 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:331810x0, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:45:30,158 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33181-0x10030bfbd4a0001 connected 2024-11-11T21:45:30,158 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T21:45:30,161 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T21:45:30,162 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T21:45:30,163 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:45:30,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33181 2024-11-11T21:45:30,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33181 2024-11-11T21:45:30,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33181 2024-11-11T21:45:30,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33181 2024-11-11T21:45:30,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33181 2024-11-11T21:45:30,177 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;22a1f13b90b0:42845 2024-11-11T21:45:30,178 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/22a1f13b90b0,42845,1731361530096 2024-11-11T21:45:30,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:45:30,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:45:30,180 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/22a1f13b90b0,42845,1731361530096 2024-11-11T21:45:30,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T21:45:30,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,181 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T21:45:30,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/22a1f13b90b0,42845,1731361530096 from backup master directory 2024-11-11T21:45:30,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/22a1f13b90b0,42845,1731361530096 2024-11-11T21:45:30,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:45:30,182 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:45:30,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:45:30,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=22a1f13b90b0,42845,1731361530096 2024-11-11T21:45:30,187 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/hbase.id] with ID: 32fbf301-a1d4-49cf-81ad-27e137538ed1 2024-11-11T21:45:30,187 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/.tmp/hbase.id 2024-11-11T21:45:30,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39385 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:45:30,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:45:30,195 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/.tmp/hbase.id]:[hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/hbase.id] 2024-11-11T21:45:30,210 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:30,210 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T21:45:30,212 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T21:45:30,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39385 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:45:30,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:45:30,254 INFO [regionserver/22a1f13b90b0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:45:30,628 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:45:30,630 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T21:45:30,631 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:45:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:45:30,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39385 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:45:30,647 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store 2024-11-11T21:45:30,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39385 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:45:30,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:45:30,654 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:30,654 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:45:30,654 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:30,654 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:30,654 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:45:30,654 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:30,654 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:45:30,654 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361530654Disabling compacts and flushes for region at 1731361530654Disabling writes for close at 1731361530654Writing region close event to WAL at 1731361530654Closed at 1731361530654 2024-11-11T21:45:30,655 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/.initializing 2024-11-11T21:45:30,655 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096 2024-11-11T21:45:30,659 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C42845%2C1731361530096, suffix=, logDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/oldWALs, maxLogs=10 2024-11-11T21:45:30,659 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C42845%2C1731361530096.1731361530659 2024-11-11T21:45:30,665 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 2024-11-11T21:45:30,666 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35507:35507),(127.0.0.1/127.0.0.1:45383:45383)] 2024-11-11T21:45:30,667 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:45:30,667 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:30,668 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,668 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,670 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T21:45:30,672 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:30,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:30,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T21:45:30,674 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:30,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:45:30,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T21:45:30,677 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:30,678 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:45:30,678 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,680 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T21:45:30,680 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:30,681 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:45:30,681 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,682 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,682 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,685 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,685 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,686 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T21:45:30,687 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:45:30,690 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:45:30,690 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741028, jitterRate=-0.057735368609428406}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T21:45:30,691 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731361530668Initializing all the Stores at 1731361530669 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361530669Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361530669Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361530669Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361530669Cleaning up temporary data from old regions at 1731361530685 (+16 ms)Region opened successfully at 1731361530691 (+6 ms) 2024-11-11T21:45:30,691 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T21:45:30,695 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fc479f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:45:30,696 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T21:45:30,696 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T21:45:30,696 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T21:45:30,696 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T21:45:30,697 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T21:45:30,697 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T21:45:30,697 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T21:45:30,700 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T21:45:30,701 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T21:45:30,701 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T21:45:30,702 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T21:45:30,702 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T21:45:30,703 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T21:45:30,703 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T21:45:30,704 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T21:45:30,705 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T21:45:30,706 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T21:45:30,706 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T21:45:30,708 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T21:45:30,709 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T21:45:30,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:45:30,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:45:30,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,710 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=22a1f13b90b0,42845,1731361530096, sessionid=0x10030bfbd4a0000, setting cluster-up flag (Was=false) 2024-11-11T21:45:30,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,715 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T21:45:30,716 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,42845,1731361530096 2024-11-11T21:45:30,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:30,722 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T21:45:30,723 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,42845,1731361530096 2024-11-11T21:45:30,724 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T21:45:30,726 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T21:45:30,726 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T21:45:30,726 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T21:45:30,727 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 22a1f13b90b0,42845,1731361530096 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T21:45:30,728 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:45:30,728 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:45:30,728 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:45:30,728 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:45:30,728 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/22a1f13b90b0:0, corePoolSize=10, maxPoolSize=10 2024-11-11T21:45:30,728 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,728 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:45:30,728 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,730 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:45:30,730 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T21:45:30,731 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731361560731 2024-11-11T21:45:30,731 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T21:45:30,731 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T21:45:30,731 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T21:45:30,731 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T21:45:30,731 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T21:45:30,731 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T21:45:30,731 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,731 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:30,732 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T21:45:30,732 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T21:45:30,732 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T21:45:30,732 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T21:45:30,732 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T21:45:30,732 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T21:45:30,732 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361530732,5,FailOnTimeoutGroup] 2024-11-11T21:45:30,733 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361530732,5,FailOnTimeoutGroup] 2024-11-11T21:45:30,733 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,733 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T21:45:30,733 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,733 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39385 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:45:30,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:45:30,743 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T21:45:30,743 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4 2024-11-11T21:45:30,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39385 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:45:30,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:45:30,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:30,755 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:45:30,756 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:45:30,756 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:30,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:30,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:45:30,758 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:45:30,758 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:30,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:30,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:45:30,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:45:30,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:30,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:30,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:45:30,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:45:30,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:30,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:30,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:45:30,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740 2024-11-11T21:45:30,764 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740 2024-11-11T21:45:30,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:45:30,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:45:30,766 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:45:30,767 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(746): ClusterId : 32fbf301-a1d4-49cf-81ad-27e137538ed1 2024-11-11T21:45:30,767 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:45:30,767 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T21:45:30,769 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T21:45:30,769 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T21:45:30,770 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:45:30,770 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802389, jitterRate=0.020290449261665344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:45:30,771 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T21:45:30,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731361530754Initializing all the Stores at 1731361530754Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361530754Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361530755 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361530755Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361530755Cleaning up temporary data from old regions at 1731361530765 (+10 ms)Region opened successfully at 1731361530771 (+6 ms) 2024-11-11T21:45:30,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:45:30,771 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:45:30,771 DEBUG [RS:0;22a1f13b90b0:33181 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fc2a37c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:45:30,771 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:45:30,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:45:30,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:45:30,772 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:45:30,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361530771Disabling compacts and flushes for region at 1731361530771Disabling writes for close at 1731361530772 (+1 ms)Writing region close event to WAL at 1731361530772Closed at 1731361530772 2024-11-11T21:45:30,774 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:45:30,774 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T21:45:30,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T21:45:30,775 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:45:30,777 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T21:45:30,786 DEBUG [RS:0;22a1f13b90b0:33181 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;22a1f13b90b0:33181 2024-11-11T21:45:30,787 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T21:45:30,787 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T21:45:30,787 DEBUG [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T21:45:30,788 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(2659): reportForDuty to master=22a1f13b90b0,42845,1731361530096 with port=33181, startcode=1731361530149 2024-11-11T21:45:30,788 DEBUG [RS:0;22a1f13b90b0:33181 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T21:45:30,790 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46767, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T21:45:30,790 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42845 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:30,790 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42845 {}] master.ServerManager(517): Registering regionserver=22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:30,792 DEBUG [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4 2024-11-11T21:45:30,792 DEBUG [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39361 2024-11-11T21:45:30,792 DEBUG [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T21:45:30,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:45:30,794 DEBUG [RS:0;22a1f13b90b0:33181 {}] zookeeper.ZKUtil(111): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:30,794 WARN [RS:0;22a1f13b90b0:33181 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:45:30,794 INFO [RS:0;22a1f13b90b0:33181 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:45:30,794 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [22a1f13b90b0,33181,1731361530149] 2024-11-11T21:45:30,794 DEBUG [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:30,798 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T21:45:30,801 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T21:45:30,801 INFO [RS:0;22a1f13b90b0:33181 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T21:45:30,802 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,802 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T21:45:30,803 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T21:45:30,803 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,803 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,803 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,803 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,804 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,804 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,804 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:45:30,804 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,804 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,804 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,804 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,804 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,804 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:30,805 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:45:30,805 DEBUG [RS:0;22a1f13b90b0:33181 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:45:30,805 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,805 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,805 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,805 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,805 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,805 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,33181,1731361530149-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:45:30,827 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T21:45:30,828 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,33181,1731361530149-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,828 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,828 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.Replication(171): 22a1f13b90b0,33181,1731361530149 started 2024-11-11T21:45:30,843 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:30,843 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(1482): Serving as 22a1f13b90b0,33181,1731361530149, RpcServer on 22a1f13b90b0/172.17.0.2:33181, sessionid=0x10030bfbd4a0001 2024-11-11T21:45:30,843 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T21:45:30,843 DEBUG [RS:0;22a1f13b90b0:33181 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:30,843 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,33181,1731361530149' 2024-11-11T21:45:30,843 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T21:45:30,844 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T21:45:30,844 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T21:45:30,844 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T21:45:30,844 DEBUG [RS:0;22a1f13b90b0:33181 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:30,844 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,33181,1731361530149' 2024-11-11T21:45:30,845 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T21:45:30,845 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T21:45:30,845 DEBUG [RS:0;22a1f13b90b0:33181 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T21:45:30,846 INFO [RS:0;22a1f13b90b0:33181 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T21:45:30,846 INFO [RS:0;22a1f13b90b0:33181 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T21:45:30,927 WARN [22a1f13b90b0:42845 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T21:45:30,952 INFO [RS:0;22a1f13b90b0:33181 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C33181%2C1731361530149, suffix=, logDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs, maxLogs=32 2024-11-11T21:45:30,954 INFO [RS:0;22a1f13b90b0:33181 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33181%2C1731361530149.1731361530953 2024-11-11T21:45:30,986 INFO [RS:0;22a1f13b90b0:33181 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 2024-11-11T21:45:30,987 DEBUG [RS:0;22a1f13b90b0:33181 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35507:35507),(127.0.0.1/127.0.0.1:45383:45383)] 2024-11-11T21:45:31,177 DEBUG [22a1f13b90b0:42845 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T21:45:31,178 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:31,180 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,33181,1731361530149, state=OPENING 2024-11-11T21:45:31,181 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T21:45:31,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:31,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:45:31,182 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:45:31,183 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:45:31,183 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:45:31,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,33181,1731361530149}] 2024-11-11T21:45:31,336 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T21:45:31,340 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35045, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T21:45:31,345 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T21:45:31,345 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:45:31,348 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C33181%2C1731361530149.meta, suffix=.meta, logDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs, maxLogs=32 2024-11-11T21:45:31,349 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta 2024-11-11T21:45:31,358 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta 2024-11-11T21:45:31,367 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45383:45383),(127.0.0.1/127.0.0.1:35507:35507)] 2024-11-11T21:45:31,368 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:45:31,368 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T21:45:31,368 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T21:45:31,368 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T21:45:31,369 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T21:45:31,369 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:31,369 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T21:45:31,369 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T21:45:31,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:45:31,374 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:45:31,374 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:31,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:31,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:45:31,376 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:45:31,376 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:31,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:31,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:45:31,378 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:45:31,378 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:31,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:31,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:45:31,381 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:45:31,381 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:31,381 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:45:31,382 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:45:31,383 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740 2024-11-11T21:45:31,385 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740 2024-11-11T21:45:31,387 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:45:31,387 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:45:31,388 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:45:31,389 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:45:31,390 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721988, jitterRate=-0.08194506168365479}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:45:31,390 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T21:45:31,391 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731361531369Writing region info on filesystem at 1731361531369Initializing all the Stores at 1731361531372 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361531372Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361531373 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361531373Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361531373Cleaning up temporary data from old regions at 1731361531387 (+14 ms)Running coprocessor post-open hooks at 1731361531390 (+3 ms)Region opened successfully at 1731361531391 (+1 ms) 2024-11-11T21:45:31,392 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731361531336 2024-11-11T21:45:31,396 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T21:45:31,396 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:31,396 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T21:45:31,398 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,33181,1731361530149, state=OPEN 2024-11-11T21:45:31,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:45:31,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:45:31,401 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:31,401 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:45:31,401 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:45:31,405 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T21:45:31,405 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,33181,1731361530149 in 219 msec 2024-11-11T21:45:31,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T21:45:31,411 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 631 msec 2024-11-11T21:45:31,412 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:45:31,412 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T21:45:31,414 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:45:31,414 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,33181,1731361530149, seqNum=-1] 2024-11-11T21:45:31,414 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:45:31,416 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54173, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:45:31,424 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 696 msec 2024-11-11T21:45:31,424 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731361531424, completionTime=-1 2024-11-11T21:45:31,424 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T21:45:31,425 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T21:45:31,427 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T21:45:31,427 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731361591427 2024-11-11T21:45:31,427 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731361651427 2024-11-11T21:45:31,427 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-11T21:45:31,427 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,42845,1731361530096-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,427 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,42845,1731361530096-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,427 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,42845,1731361530096-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,427 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-22a1f13b90b0:42845, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,428 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,428 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,430 DEBUG [master/22a1f13b90b0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T21:45:31,433 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.250sec 2024-11-11T21:45:31,434 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T21:45:31,434 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T21:45:31,434 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T21:45:31,434 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T21:45:31,434 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T21:45:31,434 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,42845,1731361530096-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:45:31,434 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,42845,1731361530096-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T21:45:31,436 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T21:45:31,436 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T21:45:31,436 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,42845,1731361530096-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-11T21:45:31,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ecb04fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:45:31,485 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 22a1f13b90b0,42845,-1 for getting cluster id 2024-11-11T21:45:31,485 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T21:45:31,487 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '32fbf301-a1d4-49cf-81ad-27e137538ed1' 2024-11-11T21:45:31,488 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T21:45:31,488 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "32fbf301-a1d4-49cf-81ad-27e137538ed1" 2024-11-11T21:45:31,489 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78d13ea1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:45:31,489 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [22a1f13b90b0,42845,-1] 2024-11-11T21:45:31,489 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T21:45:31,490 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:45:31,491 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53320, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T21:45:31,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b2e9f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:45:31,493 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:45:31,494 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,33181,1731361530149, seqNum=-1] 2024-11-11T21:45:31,494 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:45:31,496 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57614, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:45:31,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=22a1f13b90b0,42845,1731361530096 2024-11-11T21:45:31,503 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:31,506 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T21:45:31,522 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:45:31,522 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:31,522 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:31,522 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:45:31,522 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:45:31,522 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:45:31,522 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T21:45:31,522 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:45:31,523 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37551 2024-11-11T21:45:31,525 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37551 connecting to ZooKeeper ensemble=127.0.0.1:64033 2024-11-11T21:45:31,526 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:31,528 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:45:31,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375510x0, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:45:31,532 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-11T21:45:31,532 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:375510x0, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-11T21:45:31,532 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37551-0x10030bfbd4a0002 connected 2024-11-11T21:45:31,533 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T21:45:31,534 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T21:45:31,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:37551-0x10030bfbd4a0002, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T21:45:31,536 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37551-0x10030bfbd4a0002, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:45:31,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37551 2024-11-11T21:45:31,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37551 2024-11-11T21:45:31,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37551 2024-11-11T21:45:31,541 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37551 2024-11-11T21:45:31,542 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37551 2024-11-11T21:45:31,543 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(746): ClusterId : 32fbf301-a1d4-49cf-81ad-27e137538ed1 2024-11-11T21:45:31,543 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T21:45:31,544 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T21:45:31,544 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T21:45:31,546 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T21:45:31,547 DEBUG [RS:1;22a1f13b90b0:37551 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f99818f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:45:31,559 DEBUG [RS:1;22a1f13b90b0:37551 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;22a1f13b90b0:37551 2024-11-11T21:45:31,559 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T21:45:31,559 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T21:45:31,559 DEBUG [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T21:45:31,560 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(2659): reportForDuty to master=22a1f13b90b0,42845,1731361530096 with port=37551, startcode=1731361531521 2024-11-11T21:45:31,560 DEBUG [RS:1;22a1f13b90b0:37551 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T21:45:31,562 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44259, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T21:45:31,562 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42845 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 22a1f13b90b0,37551,1731361531521 2024-11-11T21:45:31,562 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42845 {}] master.ServerManager(517): Registering regionserver=22a1f13b90b0,37551,1731361531521 2024-11-11T21:45:31,564 DEBUG [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4 2024-11-11T21:45:31,564 DEBUG [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39361 2024-11-11T21:45:31,564 DEBUG [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T21:45:31,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:45:31,566 DEBUG [RS:1;22a1f13b90b0:37551 {}] zookeeper.ZKUtil(111): regionserver:37551-0x10030bfbd4a0002, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/22a1f13b90b0,37551,1731361531521 2024-11-11T21:45:31,566 WARN [RS:1;22a1f13b90b0:37551 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:45:31,566 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [22a1f13b90b0,37551,1731361531521] 2024-11-11T21:45:31,566 INFO [RS:1;22a1f13b90b0:37551 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:45:31,566 DEBUG [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521 2024-11-11T21:45:31,572 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T21:45:31,575 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T21:45:31,577 INFO [RS:1;22a1f13b90b0:37551 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T21:45:31,577 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,577 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T21:45:31,578 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T21:45:31,578 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,579 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,579 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,579 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,579 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,579 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,579 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:45:31,579 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,579 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,579 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,580 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,580 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,580 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:45:31,580 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:45:31,580 DEBUG [RS:1;22a1f13b90b0:37551 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:45:31,581 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,581 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,581 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,581 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,582 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,582 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,37551,1731361531521-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:45:31,603 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T21:45:31,603 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,37551,1731361531521-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,603 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,603 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.Replication(171): 22a1f13b90b0,37551,1731361531521 started 2024-11-11T21:45:31,618 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:45:31,618 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(1482): Serving as 22a1f13b90b0,37551,1731361531521, RpcServer on 22a1f13b90b0/172.17.0.2:37551, sessionid=0x10030bfbd4a0002 2024-11-11T21:45:31,619 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T21:45:31,619 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;22a1f13b90b0:37551,5,FailOnTimeoutGroup] 2024-11-11T21:45:31,619 DEBUG [RS:1;22a1f13b90b0:37551 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 22a1f13b90b0,37551,1731361531521 2024-11-11T21:45:31,619 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,37551,1731361531521' 2024-11-11T21:45:31,619 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T21:45:31,619 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-11T21:45:31,619 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T21:45:31,619 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T21:45:31,620 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T21:45:31,620 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T21:45:31,620 DEBUG [RS:1;22a1f13b90b0:37551 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 22a1f13b90b0,37551,1731361531521 2024-11-11T21:45:31,620 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,37551,1731361531521' 2024-11-11T21:45:31,620 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T21:45:31,620 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T21:45:31,621 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 22a1f13b90b0,42845,1731361530096 2024-11-11T21:45:31,621 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@397196d9 2024-11-11T21:45:31,621 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T21:45:31,621 DEBUG [RS:1;22a1f13b90b0:37551 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T21:45:31,622 INFO [RS:1;22a1f13b90b0:37551 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T21:45:31,622 INFO [RS:1;22a1f13b90b0:37551 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T21:45:31,623 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53326, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T21:45:31,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42845 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T21:45:31,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42845 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T21:45:31,624 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42845 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:45:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42845 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T21:45:31,628 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T21:45:31,629 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:31,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42845 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-11T21:45:31,630 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T21:45:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:45:31,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39385 is added to blk_1073741835_1011 (size=393) 2024-11-11T21:45:31,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741835_1011 (size=393) 2024-11-11T21:45:31,650 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8a44818e3874d8ebf6a5ff19dff66dd0, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4 2024-11-11T21:45:31,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39385 is added to blk_1073741836_1012 (size=76) 2024-11-11T21:45:31,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741836_1012 (size=76) 2024-11-11T21:45:31,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:31,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 8a44818e3874d8ebf6a5ff19dff66dd0, disabling compactions & flushes 2024-11-11T21:45:31,665 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:45:31,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:45:31,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. after waiting 0 ms 2024-11-11T21:45:31,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:45:31,665 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:45:31,665 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8a44818e3874d8ebf6a5ff19dff66dd0: Waiting for close lock at 1731361531665Disabling compacts and flushes for region at 1731361531665Disabling writes for close at 1731361531665Writing region close event to WAL at 1731361531665Closed at 1731361531665 2024-11-11T21:45:31,667 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T21:45:31,667 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731361531667"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731361531667"}]},"ts":"1731361531667"} 2024-11-11T21:45:31,671 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T21:45:31,672 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T21:45:31,673 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361531672"}]},"ts":"1731361531672"} 2024-11-11T21:45:31,676 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-11T21:45:31,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8a44818e3874d8ebf6a5ff19dff66dd0, ASSIGN}] 2024-11-11T21:45:31,679 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8a44818e3874d8ebf6a5ff19dff66dd0, ASSIGN 2024-11-11T21:45:31,680 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8a44818e3874d8ebf6a5ff19dff66dd0, ASSIGN; state=OFFLINE, location=22a1f13b90b0,33181,1731361530149; forceNewPlan=false, retain=false 2024-11-11T21:45:31,724 INFO [RS:1;22a1f13b90b0:37551 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C37551%2C1731361531521, suffix=, logDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs, maxLogs=32 2024-11-11T21:45:31,725 INFO [RS:1;22a1f13b90b0:37551 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C37551%2C1731361531521.1731361531725 2024-11-11T21:45:31,732 INFO [RS:1;22a1f13b90b0:37551 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 2024-11-11T21:45:31,737 DEBUG [RS:1;22a1f13b90b0:37551 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45383:45383),(127.0.0.1/127.0.0.1:35507:35507)] 2024-11-11T21:45:31,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:31,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:31,831 INFO [22a1f13b90b0:42845 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T21:45:31,832 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8a44818e3874d8ebf6a5ff19dff66dd0, regionState=OPENING, regionLocation=22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:31,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8a44818e3874d8ebf6a5ff19dff66dd0, ASSIGN because future has completed 2024-11-11T21:45:31,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8a44818e3874d8ebf6a5ff19dff66dd0, server=22a1f13b90b0,33181,1731361530149}] 2024-11-11T21:45:31,998 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:45:31,999 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8a44818e3874d8ebf6a5ff19dff66dd0, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:45:32,000 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,000 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:45:32,001 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,001 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,003 INFO [StoreOpener-8a44818e3874d8ebf6a5ff19dff66dd0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,006 INFO [StoreOpener-8a44818e3874d8ebf6a5ff19dff66dd0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a44818e3874d8ebf6a5ff19dff66dd0 columnFamilyName info 2024-11-11T21:45:32,006 DEBUG [StoreOpener-8a44818e3874d8ebf6a5ff19dff66dd0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:45:32,007 INFO [StoreOpener-8a44818e3874d8ebf6a5ff19dff66dd0-1 {}] regionserver.HStore(327): Store=8a44818e3874d8ebf6a5ff19dff66dd0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:45:32,007 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,008 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,008 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,009 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,009 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,012 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,016 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:45:32,017 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8a44818e3874d8ebf6a5ff19dff66dd0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868451, jitterRate=0.1042935699224472}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T21:45:32,018 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:32,019 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8a44818e3874d8ebf6a5ff19dff66dd0: Running coprocessor pre-open hook at 1731361532001Writing region info on filesystem at 1731361532001Initializing all the Stores at 1731361532003 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361532003Cleaning up temporary data from old regions at 1731361532009 (+6 ms)Running coprocessor post-open hooks at 1731361532018 (+9 ms)Region opened successfully at 1731361532019 (+1 ms) 2024-11-11T21:45:32,021 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0., pid=6, masterSystemTime=1731361531990 2024-11-11T21:45:32,026 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:45:32,026 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:45:32,027 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8a44818e3874d8ebf6a5ff19dff66dd0, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,33181,1731361530149 2024-11-11T21:45:32,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8a44818e3874d8ebf6a5ff19dff66dd0, server=22a1f13b90b0,33181,1731361530149 because future has completed 2024-11-11T21:45:32,036 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T21:45:32,036 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8a44818e3874d8ebf6a5ff19dff66dd0, server=22a1f13b90b0,33181,1731361530149 in 197 msec 2024-11-11T21:45:32,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T21:45:32,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8a44818e3874d8ebf6a5ff19dff66dd0, ASSIGN in 360 msec 2024-11-11T21:45:32,040 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T21:45:32,040 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361532040"}]},"ts":"1731361532040"} 2024-11-11T21:45:32,042 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-11T21:45:32,044 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T21:45:32,046 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 420 msec 2024-11-11T21:45:32,323 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T21:45:32,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:32,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:32,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:32,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:36,799 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T21:45:36,801 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-11T21:45:37,503 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T21:45:37,505 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:37,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:37,540 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:37,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:45:41,463 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T21:45:41,464 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-11T21:45:41,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:45:41,723 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-11T21:45:41,723 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-11T21:45:41,729 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T21:45:41,729 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:45:41,747 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:41,751 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:41,752 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:41,752 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:41,752 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:45:41,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21cd4c78{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:41,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e308c17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:41,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@daf04c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/java.io.tmpdir/jetty-localhost-43247-hadoop-hdfs-3_4_1-tests_jar-_-any-1197796781859177506/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:41,855 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36d0b5ff{HTTP/1.1, (http/1.1)}{localhost:43247} 2024-11-11T21:45:41,855 INFO [Time-limited test {}] server.Server(415): Started @116254ms 2024-11-11T21:45:41,856 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:45:41,893 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:41,896 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:41,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:41,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:41,897 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T21:45:41,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d7890e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:41,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2281f2b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:41,917 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data6/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:41,917 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data5/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:41,938 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:45:41,940 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc85a12746a7321d with lease ID 0xaf8e50ebdfce26fa: Processing first storage report for DS-92e28678-41c5-45ad-bd13-8794d63ab511 from datanode DatanodeRegistration(127.0.0.1:41801, datanodeUuid=1531f6c2-7271-4a1e-85b0-f25a54a59b4c, infoPort=35953, infoSecurePort=0, ipcPort=38913, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:41,940 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc85a12746a7321d with lease ID 0xaf8e50ebdfce26fa: from storage DS-92e28678-41c5-45ad-bd13-8794d63ab511 node DatanodeRegistration(127.0.0.1:41801, datanodeUuid=1531f6c2-7271-4a1e-85b0-f25a54a59b4c, infoPort=35953, infoSecurePort=0, ipcPort=38913, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:41,940 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfc85a12746a7321d with lease ID 0xaf8e50ebdfce26fa: Processing first storage report for DS-ce5d069d-3d8b-469a-9648-2b52c2a3d771 from datanode DatanodeRegistration(127.0.0.1:41801, datanodeUuid=1531f6c2-7271-4a1e-85b0-f25a54a59b4c, infoPort=35953, infoSecurePort=0, ipcPort=38913, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:41,940 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc85a12746a7321d with lease ID 0xaf8e50ebdfce26fa: from storage DS-ce5d069d-3d8b-469a-9648-2b52c2a3d771 node DatanodeRegistration(127.0.0.1:41801, datanodeUuid=1531f6c2-7271-4a1e-85b0-f25a54a59b4c, infoPort=35953, infoSecurePort=0, ipcPort=38913, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:41,999 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a3a27bb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/java.io.tmpdir/jetty-localhost-35619-hadoop-hdfs-3_4_1-tests_jar-_-any-13198494362810689958/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:41,999 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@712f5f14{HTTP/1.1, (http/1.1)}{localhost:35619} 2024-11-11T21:45:41,999 INFO [Time-limited test {}] server.Server(415): Started @116399ms 2024-11-11T21:45:42,001 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:45:42,029 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:42,033 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:42,034 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:42,034 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:42,034 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:45:42,035 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fb911ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:42,035 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719bbb9b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:42,066 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data7/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:42,066 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data8/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:42,081 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:45:42,083 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7360ca4d93a65ea2 with lease ID 0xaf8e50ebdfce26fb: Processing first storage report for DS-84414f94-462e-4c95-9d3a-8074c10b0154 from datanode DatanodeRegistration(127.0.0.1:41799, datanodeUuid=277a0515-62fe-427a-b159-64383f7bf702, infoPort=34445, infoSecurePort=0, ipcPort=45101, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:42,083 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7360ca4d93a65ea2 with lease ID 0xaf8e50ebdfce26fb: from storage DS-84414f94-462e-4c95-9d3a-8074c10b0154 node DatanodeRegistration(127.0.0.1:41799, datanodeUuid=277a0515-62fe-427a-b159-64383f7bf702, infoPort=34445, infoSecurePort=0, ipcPort=45101, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:42,083 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7360ca4d93a65ea2 with lease ID 0xaf8e50ebdfce26fb: Processing first storage report for DS-afd8128a-abbb-4e73-a6ce-599277bb5738 from datanode DatanodeRegistration(127.0.0.1:41799, datanodeUuid=277a0515-62fe-427a-b159-64383f7bf702, infoPort=34445, infoSecurePort=0, ipcPort=45101, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:42,083 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7360ca4d93a65ea2 with lease ID 0xaf8e50ebdfce26fb: from storage DS-afd8128a-abbb-4e73-a6ce-599277bb5738 node DatanodeRegistration(127.0.0.1:41799, datanodeUuid=277a0515-62fe-427a-b159-64383f7bf702, infoPort=34445, infoSecurePort=0, ipcPort=45101, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:42,141 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f0a2519{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/java.io.tmpdir/jetty-localhost-38213-hadoop-hdfs-3_4_1-tests_jar-_-any-13231510143191878987/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:42,142 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68004957{HTTP/1.1, (http/1.1)}{localhost:38213} 2024-11-11T21:45:42,142 INFO [Time-limited test {}] server.Server(415): Started @116541ms 2024-11-11T21:45:42,143 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:45:42,200 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:42,200 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10/current/BP-759734978-172.17.0.2-1731361529491/current, will proceed with Du for space computation calculation, 2024-11-11T21:45:42,216 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:45:42,218 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6efda1e5b2ddb9f with lease ID 0xaf8e50ebdfce26fc: Processing first storage report for DS-4a83779d-bb66-4083-92bc-6e0534ee2d99 from datanode DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:42,218 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6efda1e5b2ddb9f with lease ID 0xaf8e50ebdfce26fc: from storage DS-4a83779d-bb66-4083-92bc-6e0534ee2d99 node DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:42,218 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6efda1e5b2ddb9f with lease ID 0xaf8e50ebdfce26fc: Processing first storage report for DS-e5f0a67b-14a5-4d57-a5b2-d59975076096 from datanode DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491) 2024-11-11T21:45:42,218 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6efda1e5b2ddb9f with lease ID 0xaf8e50ebdfce26fc: from storage DS-e5f0a67b-14a5-4d57-a5b2-d59975076096 node DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:42,261 WARN [ResponseProcessor for block BP-759734978-172.17.0.2-1731361529491:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-759734978-172.17.0.2-1731361529491:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,262 WARN [ResponseProcessor for block BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,261 WARN [ResponseProcessor for block BP-759734978-172.17.0.2-1731361529491:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-759734978-172.17.0.2-1731361529491:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,262 WARN [DataStreamer for file /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 block BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK], DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:42,262 WARN [DataStreamer for file /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 block BP-759734978-172.17.0.2-1731361529491:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:42,262 WARN [DataStreamer for file /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 block BP-759734978-172.17.0.2-1731361529491:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:42,262 WARN [ResponseProcessor for block BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,263 WARN [DataStreamer for file /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta block BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK], DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:42,262 WARN [PacketResponder: BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33285] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,263 WARN [PacketResponder: BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33285] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,264 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-561504383_22 at /127.0.0.1:57580 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57580 dst: /127.0.0.1:39385 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,264 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_682195219_22 at /127.0.0.1:34198 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33285:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34198 dst: /127.0.0.1:33285 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:57540 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57540 dst: /127.0.0.1:39385 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_682195219_22 at /127.0.0.1:57516 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57516 dst: /127.0.0.1:39385 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,264 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:34214 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33285:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34214 dst: /127.0.0.1:33285 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-561504383_22 at /127.0.0.1:34256 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:33285:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34256 dst: /127.0.0.1:33285 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,265 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:57546 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57546 dst: /127.0.0.1:39385 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,264 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:34222 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33285:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34222 dst: /127.0.0.1:33285 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a9b2a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:42,267 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69bbaec1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:42,267 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:42,267 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7982676d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:42,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2606b08f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:42,270 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:45:42,270 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:45:42,270 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-759734978-172.17.0.2-1731361529491 (Datanode Uuid 8595ef5f-b595-4e43-8673-b19b1a241670) service to localhost/127.0.0.1:39361 2024-11-11T21:45:42,270 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:45:42,271 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data3/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:42,271 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data4/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:42,271 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:45:42,271 WARN [DataStreamer for file /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 block BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,271 WARN [DataStreamer for file /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta block BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,272 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@7bb95116 {}] datanode.DataXceiver(331): 127.0.0.1:39385:DataXceiver error processing unknown operation src: /127.0.0.1:34150 dst: /127.0.0.1:39385 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,272 WARN [DataStreamer for file /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 block BP-759734978-172.17.0.2-1731361529491:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,273 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_682195219_22 at /127.0.0.1:34134 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39385:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34134 dst: /127.0.0.1:39385 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:42,280 WARN [ResponseProcessor for block BP-759734978-172.17.0.2-1731361529491:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-759734978-172.17.0.2-1731361529491:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,281 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5493a194{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:42,282 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@257cf4bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:42,282 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:42,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7183cb8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:42,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@764b5008{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:42,283 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:45:42,283 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:45:42,283 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:45:42,283 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-759734978-172.17.0.2-1731361529491 (Datanode Uuid 231bda69-9879-4159-b0fd-52114e939ab8) service to localhost/127.0.0.1:39361 2024-11-11T21:45:42,284 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data1/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:42,284 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data2/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:42,284 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:45:42,288 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0., hostname=22a1f13b90b0,33181,1731361530149, seqNum=2] 2024-11-11T21:45:42,290 ERROR [FSHLog-0-hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4-prefix:22a1f13b90b0,33181,1731361530149 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,290 WARN [FSHLog-0-hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4-prefix:22a1f13b90b0,33181,1731361530149 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,291 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,291 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C33181%2C1731361530149:(num 1731361530953) roll requested 2024-11-11T21:45:42,291 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33181%2C1731361530149.1731361542291 2024-11-11T21:45:42,294 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,294 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK], DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:45:42,294 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741838_1018 2024-11-11T21:45:42,297 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:45:42,300 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1019 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,301 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:42,301 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741839_1019 2024-11-11T21:45:42,301 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] 2024-11-11T21:45:42,305 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:42,305 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:42,306 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:42,306 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:42,306 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:42,306 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361542291 2024-11-11T21:45:42,306 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,307 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:42,308 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35953:35953),(127.0.0.1/127.0.0.1:34445:34445)] 2024-11-11T21:45:42,308 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 is not closed yet, will try archiving it next time 2024-11-11T21:45:42,308 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-11T21:45:42,308 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-11T21:45:42,308 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 2024-11-11T21:45:42,312 WARN [IPC Server handler 2 on default port 39361 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741833_1009 2024-11-11T21:45:42,315 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 after 4ms 2024-11-11T21:45:42,633 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:43,582 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:44,308 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:44,310 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361542291 2024-11-11T21:45:44,311 WARN [ResponseProcessor for block BP-759734978-172.17.0.2-1731361529491:blk_1073741840_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-759734978-172.17.0.2-1731361529491:blk_1073741840_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:44,313 WARN [DataStreamer for file /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361542291 block BP-759734978-172.17.0.2-1731361529491:blk_1073741840_1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:45:44,314 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43510 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:41801:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43510 dst: /127.0.0.1:41801 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:44,314 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:57498 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:41799:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57498 dst: /127.0.0.1:41799 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:44,316 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@daf04c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:44,316 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36d0b5ff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:44,316 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:44,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e308c17{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:44,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21cd4c78{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:44,319 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:45:44,319 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:45:44,319 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-759734978-172.17.0.2-1731361529491 (Datanode Uuid 1531f6c2-7271-4a1e-85b0-f25a54a59b4c) service to localhost/127.0.0.1:39361 2024-11-11T21:45:44,319 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:45:44,319 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data5/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:44,320 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data6/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:44,320 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:45:44,634 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:45,583 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:46,309 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:46,310 WARN [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]] 2024-11-11T21:45:46,311 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C33181%2C1731361530149:(num 1731361542291) roll requested 2024-11-11T21:45:46,311 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33181%2C1731361530149.1731361546311 2024-11-11T21:45:46,316 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 after 4007ms 2024-11-11T21:45:46,316 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:46,317 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:45:46,317 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741841_1023 2024-11-11T21:45:46,318 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK] 2024-11-11T21:45:46,320 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:46,320 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:46,320 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741842_1024 2024-11-11T21:45:46,321 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] 2024-11-11T21:45:46,323 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:46,323 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:45:46,323 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741843_1025 2024-11-11T21:45:46,324 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:45:46,327 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T21:45:46,331 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:46,331 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:46,331 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:46,331 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:46,331 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:46,332 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361542291 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361546311 2024-11-11T21:45:46,332 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34445:34445),(127.0.0.1/127.0.0.1:34265:34265)] 2024-11-11T21:45:46,332 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 is not closed yet, will try archiving it next time 2024-11-11T21:45:46,332 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361542291 is not closed yet, will try archiving it next time 2024-11-11T21:45:46,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41799 is added to blk_1073741840_1022 (size=3600) 2024-11-11T21:45:46,634 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:46,736 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 is not closed yet, will try archiving it next time 2024-11-11T21:45:47,583 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,095 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7af917b1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41799, datanodeUuid=277a0515-62fe-427a-b159-64383f7bf702, infoPort=34445, infoSecurePort=0, ipcPort=45101, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741840_1022 to 127.0.0.1:33285 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:48,331 WARN [ResponseProcessor for block BP-759734978-172.17.0.2-1731361529491:blk_1073741844_1026 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-759734978-172.17.0.2-1731361529491:blk_1073741844_1026 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,332 WARN [DataStreamer for file /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361546311 block BP-759734978-172.17.0.2-1731361529491:blk_1073741844_1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741844_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:45:48,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:57510 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:41799:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57510 dst: /127.0.0.1:41799 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:48,332 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:54000 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741844_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54000 dst: /127.0.0.1:33611 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:48,333 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,333 WARN [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]] 2024-11-11T21:45:48,333 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C33181%2C1731361530149:(num 1731361546311) roll requested 2024-11-11T21:45:48,333 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33181%2C1731361530149.1731361548333 2024-11-11T21:45:48,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a3a27bb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:48,334 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@712f5f14{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:45:48,334 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:45:48,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2281f2b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:45:48,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d7890e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,STOPPED} 2024-11-11T21:45:48,335 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:45:48,335 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:45:48,335 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-759734978-172.17.0.2-1731361529491 (Datanode Uuid 277a0515-62fe-427a-b159-64383f7bf702) service to localhost/127.0.0.1:39361 2024-11-11T21:45:48,335 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:45:48,336 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data7/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:48,336 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data8/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:45:48,336 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:45:48,339 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,339 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:45:48,339 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741845_1028 2024-11-11T21:45:48,340 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:45:48,342 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,342 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK], DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:45:48,342 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741846_1029 2024-11-11T21:45:48,343 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:45:48,344 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,344 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:45:48,345 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741847_1030 2024-11-11T21:45:48,345 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK] 2024-11-11T21:45:48,347 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,347 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:48,347 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741848_1031 2024-11-11T21:45:48,347 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] 2024-11-11T21:45:48,348 WARN [IPC Server handler 4 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T21:45:48,348 WARN [IPC Server handler 4 on default port 39361 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T21:45:48,348 WARN [IPC Server handler 4 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T21:45:48,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] regionserver.HRegion(8855): Flush requested on 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:48,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8a44818e3874d8ebf6a5ff19dff66dd0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:45:48,351 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:48,352 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:48,352 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:48,352 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:48,352 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:48,352 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361546311 with entries=6, filesize=6.11 KB; new WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361548333 2024-11-11T21:45:48,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741844_1027 (size=6261) 2024-11-11T21:45:48,361 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34265:34265)] 2024-11-11T21:45:48,362 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 is not closed yet, will try archiving it next time 2024-11-11T21:45:48,362 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361546311 is not closed yet, will try archiving it next time 2024-11-11T21:45:48,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/cdddda0098ca48f88b2cbc61c1d1c53a is 1080, key is row0002/info:/1731361544321/Put/seqid=0 2024-11-11T21:45:48,373 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,373 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:45:48,373 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741850_1033 2024-11-11T21:45:48,374 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK] 2024-11-11T21:45:48,375 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,375 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:45:48,375 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741851_1034 2024-11-11T21:45:48,376 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:45:48,378 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33285 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,378 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:54016 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741852_1035 to mirror 127.0.0.1:33285 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:48,378 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:48,378 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:54016 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:45:48,378 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741852_1035 2024-11-11T21:45:48,378 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:54016 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54016 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:48,379 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] 2024-11-11T21:45:48,381 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39385 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,381 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:54032 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741853_1036 to mirror 127.0.0.1:39385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:48,381 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:45:48,381 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:54032 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:45:48,381 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741853_1036 2024-11-11T21:45:48,382 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:54032 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54032 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:48,382 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:45:48,383 WARN [IPC Server handler 3 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T21:45:48,383 WARN [IPC Server handler 3 on default port 39361 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T21:45:48,383 WARN [IPC Server handler 3 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T21:45:48,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741854_1037 (size=10347) 2024-11-11T21:45:48,635 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:48,756 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 is not closed yet, will try archiving it next time 2024-11-11T21:45:48,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/cdddda0098ca48f88b2cbc61c1d1c53a 2024-11-11T21:45:48,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/cdddda0098ca48f88b2cbc61c1d1c53a as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cdddda0098ca48f88b2cbc61c1d1c53a 2024-11-11T21:45:48,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cdddda0098ca48f88b2cbc61c1d1c53a, entries=5, sequenceid=11, filesize=10.1 K 2024-11-11T21:45:48,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 8a44818e3874d8ebf6a5ff19dff66dd0 in 460ms, sequenceid=11, compaction requested=false 2024-11-11T21:45:48,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8a44818e3874d8ebf6a5ff19dff66dd0: 2024-11-11T21:45:48,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] regionserver.HRegion(8855): Flush requested on 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:48,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8a44818e3874d8ebf6a5ff19dff66dd0 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-11T21:45:49,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/52a1f41aea674791844d1446d31d45d8 is 1080, key is row0007/info:/1731361548350/Put/seqid=0 2024-11-11T21:45:49,003 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:49,003 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:45:49,003 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741855_1038 2024-11-11T21:45:49,004 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK] 2024-11-11T21:45:49,005 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:49,005 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:49,005 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741856_1039 2024-11-11T21:45:49,005 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] 2024-11-11T21:45:49,008 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:49,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53782 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741857_1040 to mirror 127.0.0.1:41799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:49,008 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:45:49,008 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741857_1040 2024-11-11T21:45:49,008 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53782 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:45:49,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53782 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53782 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:49,008 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:45:49,011 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39385 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:49,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53788 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741858_1041 to mirror 127.0.0.1:39385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:49,011 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:45:49,011 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741858_1041 2024-11-11T21:45:49,011 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53788 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:45:49,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53788 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53788 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:49,012 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:45:49,013 WARN [IPC Server handler 0 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T21:45:49,013 WARN [IPC Server handler 0 on default port 39361 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T21:45:49,013 WARN [IPC Server handler 0 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T21:45:49,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741859_1042 (size=12506) 2024-11-11T21:45:49,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/52a1f41aea674791844d1446d31d45d8 2024-11-11T21:45:49,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/52a1f41aea674791844d1446d31d45d8 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/52a1f41aea674791844d1446d31d45d8 2024-11-11T21:45:49,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/52a1f41aea674791844d1446d31d45d8, entries=7, sequenceid=24, filesize=12.2 K 2024-11-11T21:45:49,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 8a44818e3874d8ebf6a5ff19dff66dd0 in 443ms, sequenceid=24, compaction requested=false 2024-11-11T21:45:49,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8a44818e3874d8ebf6a5ff19dff66dd0: 2024-11-11T21:45:49,434 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-11T21:45:49,434 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:49,434 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/52a1f41aea674791844d1446d31d45d8 because midkey is the same as first or last row 2024-11-11T21:45:49,584 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,362 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,362 WARN [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]] 2024-11-11T21:45:50,362 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C33181%2C1731361530149:(num 1731361548333) roll requested 2024-11-11T21:45:50,363 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33181%2C1731361530149.1731361550362 2024-11-11T21:45:50,366 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,367 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK], DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:45:50,367 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741860_1043 2024-11-11T21:45:50,367 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK] 2024-11-11T21:45:50,369 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,369 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:45:50,370 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741861_1044 2024-11-11T21:45:50,370 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:45:50,373 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39385 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53794 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741862_1045 to mirror 127.0.0.1:39385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,374 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:45:50,374 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741862_1045 2024-11-11T21:45:50,374 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53794 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T21:45:50,374 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53794 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53794 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,374 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:45:50,376 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33285 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,376 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53802 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741863_1046 to mirror 127.0.0.1:33285 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,377 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:50,377 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741863_1046 2024-11-11T21:45:50,377 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53802 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T21:45:50,377 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53802 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53802 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,377 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] 2024-11-11T21:45:50,378 WARN [IPC Server handler 2 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T21:45:50,378 WARN [IPC Server handler 2 on default port 39361 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T21:45:50,378 WARN [IPC Server handler 2 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T21:45:50,381 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:50,381 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:50,381 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:50,381 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:50,381 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:45:50,381 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361548333 with entries=18, filesize=18.21 KB; new WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361550362 2024-11-11T21:45:50,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741849_1032 (size=18655) 2024-11-11T21:45:50,385 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34265:34265)] 2024-11-11T21:45:50,385 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 is not closed yet, will try archiving it next time 2024-11-11T21:45:50,385 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361548333 is not closed yet, will try archiving it next time 2024-11-11T21:45:50,386 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361542291 to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs/22a1f13b90b0%2C33181%2C1731361530149.1731361542291 2024-11-11T21:45:50,387 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361546311 to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs/22a1f13b90b0%2C33181%2C1731361530149.1731361546311 2024-11-11T21:45:50,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] regionserver.HRegion(8855): Flush requested on 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:50,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8a44818e3874d8ebf6a5ff19dff66dd0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T21:45:50,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/0014941f74ea4daaa7f1e311121546fd is 1079, key is tmprow/info:/1731361550426/Put/seqid=0 2024-11-11T21:45:50,435 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41801 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53818 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741865_1048 to mirror 127.0.0.1:41801 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,435 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:45:50,435 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741865_1048 2024-11-11T21:45:50,435 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53818 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:45:50,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53818 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53818 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,436 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK] 2024-11-11T21:45:50,437 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,437 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:45:50,437 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741866_1049 2024-11-11T21:45:50,437 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:45:50,439 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,439 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK], DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:45:50,439 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741867_1050 2024-11-11T21:45:50,439 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:45:50,442 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53828 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741868_1051 to mirror 127.0.0.1:33285 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,442 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33285 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,442 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53828 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:45:50,442 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:50,442 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741868_1051 2024-11-11T21:45:50,442 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53828 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53828 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,443 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] 2024-11-11T21:45:50,443 WARN [IPC Server handler 1 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T21:45:50,444 WARN [IPC Server handler 1 on default port 39361 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T21:45:50,444 WARN [IPC Server handler 1 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T21:45:50,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741869_1052 (size=6027) 2024-11-11T21:45:50,635 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,785 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 is not closed yet, will try archiving it next time 2024-11-11T21:45:50,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/0014941f74ea4daaa7f1e311121546fd 2024-11-11T21:45:50,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/0014941f74ea4daaa7f1e311121546fd as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/0014941f74ea4daaa7f1e311121546fd 2024-11-11T21:45:50,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/0014941f74ea4daaa7f1e311121546fd, entries=1, sequenceid=34, filesize=5.9 K 2024-11-11T21:45:50,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8a44818e3874d8ebf6a5ff19dff66dd0 in 442ms, sequenceid=34, compaction requested=true 2024-11-11T21:45:50,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8a44818e3874d8ebf6a5ff19dff66dd0: 2024-11-11T21:45:50,870 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-11T21:45:50,870 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:50,870 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/52a1f41aea674791844d1446d31d45d8 because midkey is the same as first or last row 2024-11-11T21:45:50,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a44818e3874d8ebf6a5ff19dff66dd0:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:45:50,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:45:50,870 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:45:50,872 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:45:50,872 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HStore(1541): 8a44818e3874d8ebf6a5ff19dff66dd0/info is initiating minor compaction (all files) 2024-11-11T21:45:50,872 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8a44818e3874d8ebf6a5ff19dff66dd0/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:45:50,872 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cdddda0098ca48f88b2cbc61c1d1c53a, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/52a1f41aea674791844d1446d31d45d8, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/0014941f74ea4daaa7f1e311121546fd] into tmpdir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp, totalSize=28.2 K 2024-11-11T21:45:50,873 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.Compactor(225): Compacting cdddda0098ca48f88b2cbc61c1d1c53a, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731361544321 2024-11-11T21:45:50,873 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.Compactor(225): Compacting 52a1f41aea674791844d1446d31d45d8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731361548350 2024-11-11T21:45:50,874 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0014941f74ea4daaa7f1e311121546fd, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731361550426 2024-11-11T21:45:50,889 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a44818e3874d8ebf6a5ff19dff66dd0#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:45:50,890 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/cec4fd8750eb4f8aa911b82ac9ea46d7 is 1080, key is row0002/info:/1731361544321/Put/seqid=0 2024-11-11T21:45:50,892 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,892 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:45:50,892 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741870_1053 2024-11-11T21:45:50,892 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:45:50,894 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,894 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:50,894 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741871_1054 2024-11-11T21:45:50,894 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] 2024-11-11T21:45:50,897 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39385 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,897 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53882 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741872_1055 to mirror 127.0.0.1:39385 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,897 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:45:50,897 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741872_1055 2024-11-11T21:45:50,897 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53882 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:45:50,897 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53882 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53882 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:50,897 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:45:50,899 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:50,899 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK], DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:45:50,899 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741873_1056 2024-11-11T21:45:50,900 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK] 2024-11-11T21:45:50,900 WARN [IPC Server handler 2 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T21:45:50,900 WARN [IPC Server handler 2 on default port 39361 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T21:45:50,901 WARN [IPC Server handler 2 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T21:45:50,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741874_1057 (size=17994) 2024-11-11T21:45:51,223 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1b660931[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741844_1027 to 127.0.0.1:33285 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:51,224 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15a1f42d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741854_1037 to 127.0.0.1:41799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:51,316 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/cec4fd8750eb4f8aa911b82ac9ea46d7 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7 2024-11-11T21:45:51,324 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8a44818e3874d8ebf6a5ff19dff66dd0/info of 8a44818e3874d8ebf6a5ff19dff66dd0 into cec4fd8750eb4f8aa911b82ac9ea46d7(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:45:51,324 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8a44818e3874d8ebf6a5ff19dff66dd0: 2024-11-11T21:45:51,324 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0., storeName=8a44818e3874d8ebf6a5ff19dff66dd0/info, priority=13, startTime=1731361550870; duration=0sec 2024-11-11T21:45:51,324 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T21:45:51,324 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:51,324 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7 because midkey is the same as first or last row 2024-11-11T21:45:51,324 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T21:45:51,324 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:51,324 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7 because midkey is the same as first or last row 2024-11-11T21:45:51,325 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-11T21:45:51,325 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:51,325 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7 because midkey is the same as first or last row 2024-11-11T21:45:51,325 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:45:51,325 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a44818e3874d8ebf6a5ff19dff66dd0:info 2024-11-11T21:45:51,584 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] regionserver.HRegion(8855): Flush requested on 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:45:51,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8a44818e3874d8ebf6a5ff19dff66dd0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T21:45:51,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/86441932b84f4fc6823730c60a58b81a is 1079, key is tmprow/info:/1731361551854/Put/seqid=0 2024-11-11T21:45:51,866 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:51,866 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK], DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:45:51,866 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741875_1058 2024-11-11T21:45:51,867 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:45:51,869 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:51,869 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:45:51,869 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741876_1059 2024-11-11T21:45:51,870 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK] 2024-11-11T21:45:51,872 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33285 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:51,872 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53908 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741877_1060 to mirror 127.0.0.1:33285 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:51,872 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK]) is bad. 2024-11-11T21:45:51,872 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741877_1060 2024-11-11T21:45:51,872 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53908 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:45:51,872 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53908 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53908 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:51,873 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33285,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK] 2024-11-11T21:45:51,875 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:51,875 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53924 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741878_1061 to mirror 127.0.0.1:41799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:51,875 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:45:51,875 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741878_1061 2024-11-11T21:45:51,875 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53924 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:45:51,875 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:53924 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53924 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:51,876 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:45:51,877 WARN [IPC Server handler 0 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T21:45:51,877 WARN [IPC Server handler 0 on default port 39361 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T21:45:51,877 WARN [IPC Server handler 0 on default port 39361 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T21:45:51,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741879_1062 (size=6027) 2024-11-11T21:45:52,221 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1b660931[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741849_1032 to 127.0.0.1:33285 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:52,221 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15a1f42d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741859_1042 to 127.0.0.1:41799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:52,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/86441932b84f4fc6823730c60a58b81a 2024-11-11T21:45:52,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/86441932b84f4fc6823730c60a58b81a as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/86441932b84f4fc6823730c60a58b81a 2024-11-11T21:45:52,305 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/86441932b84f4fc6823730c60a58b81a, entries=1, sequenceid=45, filesize=5.9 K 2024-11-11T21:45:52,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8a44818e3874d8ebf6a5ff19dff66dd0 in 451ms, sequenceid=45, compaction requested=false 2024-11-11T21:45:52,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8a44818e3874d8ebf6a5ff19dff66dd0: 2024-11-11T21:45:52,306 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-11T21:45:52,306 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:45:52,306 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7 because midkey is the same as first or last row 2024-11-11T21:45:52,386 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:52,386 WARN [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-11T21:45:52,470 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:45:52,473 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:45:52,473 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:45:52,474 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:45:52,474 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:45:52,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12f49c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:45:52,474 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2234d719{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:45:52,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1929003e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/java.io.tmpdir/jetty-localhost-46631-hadoop-hdfs-3_4_1-tests_jar-_-any-11479811395766481036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:45:52,570 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16a2580d{HTTP/1.1, (http/1.1)}{localhost:46631} 2024-11-11T21:45:52,570 INFO [Time-limited test {}] server.Server(415): Started @126969ms 2024-11-11T21:45:52,571 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:45:52,636 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:52,651 WARN [Thread-982 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:45:52,661 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ea39817212a6a66 with lease ID 0xaf8e50ebdfce26fd: from storage DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf node DatanodeRegistration(127.0.0.1:46829, datanodeUuid=8595ef5f-b595-4e43-8673-b19b1a241670, infoPort=37959, infoSecurePort=0, ipcPort=37271, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 6, hasStaleStorage: false, processing time: 3 msecs, invalidatedBlocks: 0 2024-11-11T21:45:52,662 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1ea39817212a6a66 with lease ID 0xaf8e50ebdfce26fd: from storage DS-82714e5a-99b8-4b1a-84ff-85a0cf488710 node DatanodeRegistration(127.0.0.1:46829, datanodeUuid=8595ef5f-b595-4e43-8673-b19b1a241670, infoPort=37959, infoSecurePort=0, ipcPort=37271, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:45:53,585 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:54,224 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15a1f42d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741874_1057 to 127.0.0.1:41801 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:54,224 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1b660931[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741869_1052 to 127.0.0.1:41799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:45:54,386 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:54,637 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:55,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741879_1062 (size=6027) 2024-11-11T21:45:55,585 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:56,387 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:56,637 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:57,586 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:58,387 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:58,638 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:45:59,587 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:00,073 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T21:46:00,388 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:00,638 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:00,733 ERROR [FSHLog-0-hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData-prefix:22a1f13b90b0,42845,1731361530096 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:00,733 WARN [FSHLog-0-hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData-prefix:22a1f13b90b0,42845,1731361530096 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:00,733 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C42845%2C1731361530096:(num 1731361530659) roll requested 2024-11-11T21:46:00,734 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C42845%2C1731361530096.1731361560734 2024-11-11T21:46:00,738 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:00,738 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK], DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]) is bad. 2024-11-11T21:46:00,738 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741880_1063 2024-11-11T21:46:00,738 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK] 2024-11-11T21:46:00,740 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:00,740 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK]) is bad. 2024-11-11T21:46:00,740 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741881_1064 2024-11-11T21:46:00,740 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41801,DS-92e28678-41c5-45ad-bd13-8794d63ab511,DISK] 2024-11-11T21:46:00,744 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:00,744 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:00,744 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:00,745 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:00,745 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:00,745 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361560734 2024-11-11T21:46:00,745 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:00,745 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:00,745 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 2024-11-11T21:46:00,746 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34265:34265),(127.0.0.1/127.0.0.1:37959:37959)] 2024-11-11T21:46:00,746 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 is not closed yet, will try archiving it next time 2024-11-11T21:46:00,746 WARN [IPC Server handler 3 on default port 39361 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 has not been closed. Lease recovery is in progress. RecoveryId = 1066 for block blk_1073741830_1014 2024-11-11T21:46:00,746 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 after 1ms 2024-11-11T21:46:01,587 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:02,388 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:02,675 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c94f19b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-759734978-172.17.0.2-1731361529491:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:39385,null,null]) java.net.ConnectException: Call From 22a1f13b90b0/172.17.0.2 to localhost:42557 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T21:46:02,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741833_1021 (size=455) 2024-11-11T21:46:03,347 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs/22a1f13b90b0%2C33181%2C1731361530149.1731361530953 2024-11-11T21:46:03,350 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361548333 to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs/22a1f13b90b0%2C33181%2C1731361530149.1731361548333 2024-11-11T21:46:03,588 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:03,658 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3fa4f64f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46829, datanodeUuid=8595ef5f-b595-4e43-8673-b19b1a241670, infoPort=37959, infoSecurePort=0, ipcPort=37271, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741833_1021 to 127.0.0.1:41801 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:04,389 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:04,749 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 after 4004ms 2024-11-11T21:46:05,588 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:06,389 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:06,658 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4da26ee0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46829, datanodeUuid=8595ef5f-b595-4e43-8673-b19b1a241670, infoPort=37959, infoSecurePort=0, ipcPort=37271, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741832_1008 to 127.0.0.1:41799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:06,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741836_1012 (size=76) 2024-11-11T21:46:07,588 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:07,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:46:07,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:46:08,122 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33181%2C1731361530149.1731361568121 2024-11-11T21:46:08,135 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,135 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,135 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,135 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,136 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361550362 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361568121 2024-11-11T21:46:08,137 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37959:37959),(127.0.0.1/127.0.0.1:34265:34265)] 2024-11-11T21:46:08,137 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361550362 is not closed yet, will try archiving it next time 2024-11-11T21:46:08,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741864_1047 (size=13591) 2024-11-11T21:46:08,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] regionserver.HRegion(8855): Flush requested on 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:46:08,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8a44818e3874d8ebf6a5ff19dff66dd0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T21:46:08,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/25bb4f44e6f5440cac84903b17266214 is 1080, key is row0013/info:/1731361568138/Put/seqid=0 2024-11-11T21:46:08,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741884_1068 (size=11421) 2024-11-11T21:46:08,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741884_1068 (size=11421) 2024-11-11T21:46:08,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/25bb4f44e6f5440cac84903b17266214 2024-11-11T21:46:08,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/25bb4f44e6f5440cac84903b17266214 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/25bb4f44e6f5440cac84903b17266214 2024-11-11T21:46:08,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/25bb4f44e6f5440cac84903b17266214, entries=6, sequenceid=55, filesize=11.2 K 2024-11-11T21:46:08,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 8a44818e3874d8ebf6a5ff19dff66dd0 in 26ms, sequenceid=55, compaction requested=true 2024-11-11T21:46:08,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8a44818e3874d8ebf6a5ff19dff66dd0: 2024-11-11T21:46:08,174 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-11T21:46:08,174 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:46:08,174 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7 because midkey is the same as first or last row 2024-11-11T21:46:08,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a44818e3874d8ebf6a5ff19dff66dd0:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:46:08,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:46:08,174 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:46:08,176 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:46:08,176 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HStore(1541): 8a44818e3874d8ebf6a5ff19dff66dd0/info is initiating minor compaction (all files) 2024-11-11T21:46:08,176 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8a44818e3874d8ebf6a5ff19dff66dd0/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:46:08,176 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/86441932b84f4fc6823730c60a58b81a, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/25bb4f44e6f5440cac84903b17266214] into tmpdir=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp, totalSize=34.6 K 2024-11-11T21:46:08,177 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.Compactor(225): Compacting cec4fd8750eb4f8aa911b82ac9ea46d7, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731361544321 2024-11-11T21:46:08,177 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.Compactor(225): Compacting 86441932b84f4fc6823730c60a58b81a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731361551854 2024-11-11T21:46:08,178 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] compactions.Compactor(225): Compacting 25bb4f44e6f5440cac84903b17266214, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731361552260 2024-11-11T21:46:08,196 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a44818e3874d8ebf6a5ff19dff66dd0#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:46:08,197 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/facb237e5aa641fd8a98e58cbdab0d95 is 1080, key is row0002/info:/1731361544321/Put/seqid=0 2024-11-11T21:46:08,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741885_1069 (size=23502) 2024-11-11T21:46:08,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741885_1069 (size=23502) 2024-11-11T21:46:08,209 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/facb237e5aa641fd8a98e58cbdab0d95 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/facb237e5aa641fd8a98e58cbdab0d95 2024-11-11T21:46:08,217 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8a44818e3874d8ebf6a5ff19dff66dd0/info of 8a44818e3874d8ebf6a5ff19dff66dd0 into facb237e5aa641fd8a98e58cbdab0d95(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:46:08,217 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8a44818e3874d8ebf6a5ff19dff66dd0: 2024-11-11T21:46:08,217 INFO [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0., storeName=8a44818e3874d8ebf6a5ff19dff66dd0/info, priority=13, startTime=1731361568174; duration=0sec 2024-11-11T21:46:08,217 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-11T21:46:08,217 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:46:08,217 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/facb237e5aa641fd8a98e58cbdab0d95 because midkey is the same as first or last row 2024-11-11T21:46:08,217 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-11T21:46:08,217 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:46:08,218 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/facb237e5aa641fd8a98e58cbdab0d95 because midkey is the same as first or last row 2024-11-11T21:46:08,218 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-11T21:46:08,218 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:46:08,218 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/facb237e5aa641fd8a98e58cbdab0d95 because midkey is the same as first or last row 2024-11-11T21:46:08,218 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:46:08,218 DEBUG [RS:0;22a1f13b90b0:33181-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a44818e3874d8ebf6a5ff19dff66dd0:info 2024-11-11T21:46:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33181 {}] regionserver.HRegion(8855): Flush requested on 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:46:08,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8a44818e3874d8ebf6a5ff19dff66dd0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-11T21:46:08,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/6a2e45225c4048c992a52fbf17c938c5 is 1080, key is row0018/info:/1731361568149/Put/seqid=0 2024-11-11T21:46:08,379 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,379 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:48080 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741886_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10]'}, localName='127.0.0.1:33611', datanodeUuid='748db9df-3e8d-4ea3-88d5-3839899f8e5b', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741886_1070 to mirror 127.0.0.1:41799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:08,379 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741886_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33611,DS-4a83779d-bb66-4083-92bc-6e0534ee2d99,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:46:08,379 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741886_1070 2024-11-11T21:46:08,379 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:48080 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741886_1070] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:46:08,379 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:48080 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741886_1070] {}] datanode.DataXceiver(331): 127.0.0.1:33611:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48080 dst: /127.0.0.1:33611 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:08,380 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:46:08,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741887_1071 (size=11421) 2024-11-11T21:46:08,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741887_1071 (size=11421) 2024-11-11T21:46:08,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/6a2e45225c4048c992a52fbf17c938c5 2024-11-11T21:46:08,390 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,390 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-11T21:46:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/.tmp/info/6a2e45225c4048c992a52fbf17c938c5 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/6a2e45225c4048c992a52fbf17c938c5 2024-11-11T21:46:08,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/6a2e45225c4048c992a52fbf17c938c5, entries=6, sequenceid=66, filesize=11.2 K 2024-11-11T21:46:08,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8a44818e3874d8ebf6a5ff19dff66dd0 in 37ms, sequenceid=66, compaction requested=false 2024-11-11T21:46:08,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8a44818e3874d8ebf6a5ff19dff66dd0: 2024-11-11T21:46:08,407 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-11T21:46:08,407 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:46:08,407 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/facb237e5aa641fd8a98e58cbdab0d95 because midkey is the same as first or last row 2024-11-11T21:46:08,539 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.1731361550362 to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs/22a1f13b90b0%2C33181%2C1731361530149.1731361550362 2024-11-11T21:46:08,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T21:46:08,572 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:46:08,572 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:46:08,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:08,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:08,573 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T21:46:08,574 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T21:46:08,574 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1912091167, stopped=false 2024-11-11T21:46:08,574 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=22a1f13b90b0,42845,1731361530096 2024-11-11T21:46:08,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37551-0x10030bfbd4a0002, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:46:08,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:46:08,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37551-0x10030bfbd4a0002, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:08,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:08,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:46:08,578 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:46:08,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:08,579 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:46:08,579 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:46:08,579 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:46:08,579 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:46:08,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:08,579 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37551-0x10030bfbd4a0002, quorum=127.0.0.1:64033, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:46:08,580 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '22a1f13b90b0,33181,1731361530149' ***** 2024-11-11T21:46:08,580 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T21:46:08,580 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '22a1f13b90b0,37551,1731361531521' ***** 2024-11-11T21:46:08,580 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T21:46:08,580 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T21:46:08,580 INFO [RS:1;22a1f13b90b0:37551 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T21:46:08,580 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T21:46:08,580 INFO [RS:1;22a1f13b90b0:37551 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T21:46:08,580 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(959): stopping server 22a1f13b90b0,37551,1731361531521 2024-11-11T21:46:08,581 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:46:08,581 INFO [RS:1;22a1f13b90b0:37551 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;22a1f13b90b0:37551. 2024-11-11T21:46:08,581 DEBUG [RS:1;22a1f13b90b0:37551 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:46:08,581 DEBUG [RS:1;22a1f13b90b0:37551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:08,581 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(976): stopping server 22a1f13b90b0,37551,1731361531521; all regions closed. 2024-11-11T21:46:08,581 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T21:46:08,581 INFO [RS:0;22a1f13b90b0:33181 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T21:46:08,581 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T21:46:08,581 INFO [RS:0;22a1f13b90b0:33181 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T21:46:08,581 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(3091): Received CLOSE for 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:46:08,581 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(959): stopping server 22a1f13b90b0,33181,1731361530149 2024-11-11T21:46:08,581 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,582 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:46:08,582 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,582 INFO [RS:0;22a1f13b90b0:33181 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;22a1f13b90b0:33181. 2024-11-11T21:46:08,582 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,582 DEBUG [RS:0;22a1f13b90b0:33181 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:46:08,582 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8a44818e3874d8ebf6a5ff19dff66dd0, disabling compactions & flushes 2024-11-11T21:46:08,582 DEBUG [RS:0;22a1f13b90b0:33181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:08,582 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,582 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:46:08,582 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:46:08,582 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,582 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T21:46:08,582 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T21:46:08,582 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. after waiting 0 ms 2024-11-11T21:46:08,582 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T21:46:08,582 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:46:08,582 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T21:46:08,582 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,582 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T21:46:08,582 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,583 DEBUG [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8a44818e3874d8ebf6a5ff19dff66dd0=TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.} 2024-11-11T21:46:08,583 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:46:08,583 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 2024-11-11T21:46:08,583 DEBUG [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8a44818e3874d8ebf6a5ff19dff66dd0 2024-11-11T21:46:08,583 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:46:08,583 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:46:08,583 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cdddda0098ca48f88b2cbc61c1d1c53a, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/52a1f41aea674791844d1446d31d45d8, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/0014941f74ea4daaa7f1e311121546fd, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/86441932b84f4fc6823730c60a58b81a, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/25bb4f44e6f5440cac84903b17266214] to archive 2024-11-11T21:46:08,583 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:46:08,583 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:46:08,583 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-11T21:46:08,583 WARN [IPC Server handler 1 on default port 39361 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741837_1013 2024-11-11T21:46:08,583 ERROR [FSHLog-0-hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4-prefix:22a1f13b90b0,33181,1731361530149.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,583 WARN [FSHLog-0-hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4-prefix:22a1f13b90b0,33181,1731361530149.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,584 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C33181%2C1731361530149.meta:.meta(num 1731361531348) roll requested 2024-11-11T21:46:08,584 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 after 1ms 2024-11-11T21:46:08,584 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T21:46:08,584 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33181%2C1731361530149.meta.1731361568584.meta 2024-11-11T21:46:08,586 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T21:46:08,586 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T21:46:08,586 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cdddda0098ca48f88b2cbc61c1d1c53a to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cdddda0098ca48f88b2cbc61c1d1c53a 2024-11-11T21:46:08,587 WARN [Thread-1043 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,587 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43202 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741888_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data4]'}, localName='127.0.0.1:46829', datanodeUuid='8595ef5f-b595-4e43-8673-b19b1a241670', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741888_1073 to mirror 127.0.0.1:41799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:08,587 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/52a1f41aea674791844d1446d31d45d8 to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/52a1f41aea674791844d1446d31d45d8 2024-11-11T21:46:08,588 WARN [Thread-1043 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741888_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46829,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:46:08,588 WARN [Thread-1043 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741888_1073 2024-11-11T21:46:08,588 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43202 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741888_1073] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-11T21:46:08,588 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43202 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741888_1073] {}] datanode.DataXceiver(331): 127.0.0.1:46829:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43202 dst: /127.0.0.1:46829 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:08,588 WARN [Thread-1043 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:46:08,589 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7 to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/cec4fd8750eb4f8aa911b82ac9ea46d7 2024-11-11T21:46:08,590 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/0014941f74ea4daaa7f1e311121546fd to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/0014941f74ea4daaa7f1e311121546fd 2024-11-11T21:46:08,592 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/86441932b84f4fc6823730c60a58b81a to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/86441932b84f4fc6823730c60a58b81a 2024-11-11T21:46:08,593 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,593 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,593 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,593 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,593 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,594 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361568584.meta 2024-11-11T21:46:08,594 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/25bb4f44e6f5440cac84903b17266214 to hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/info/25bb4f44e6f5440cac84903b17266214 2024-11-11T21:46:08,594 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,594 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39385,DS-9a7f8314-16c4-47f7-8ff1-c6f627e738d8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,594 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta 2024-11-11T21:46:08,594 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37959:37959),(127.0.0.1/127.0.0.1:34265:34265)] 2024-11-11T21:46:08,594 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta is not closed yet, will try archiving it next time 2024-11-11T21:46:08,594 WARN [IPC Server handler 1 on default port 39361 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741834_1010 2024-11-11T21:46:08,594 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=22a1f13b90b0:42845 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T21:46:08,595 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta after 1ms 2024-11-11T21:46:08,595 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cdddda0098ca48f88b2cbc61c1d1c53a=10347, 52a1f41aea674791844d1446d31d45d8=12506, cec4fd8750eb4f8aa911b82ac9ea46d7=17994, 0014941f74ea4daaa7f1e311121546fd=6027, 86441932b84f4fc6823730c60a58b81a=6027, 25bb4f44e6f5440cac84903b17266214=11421] 2024-11-11T21:46:08,606 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8a44818e3874d8ebf6a5ff19dff66dd0/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-11T21:46:08,606 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:46:08,607 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8a44818e3874d8ebf6a5ff19dff66dd0: Waiting for close lock at 1731361568582Running coprocessor pre-close hooks at 1731361568582Disabling compacts and flushes for region at 1731361568582Disabling writes for close at 1731361568582Writing region close event to WAL at 1731361568597 (+15 ms)Running coprocessor post-close hooks at 1731361568606 (+9 ms)Closed at 1731361568606 2024-11-11T21:46:08,607 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0. 2024-11-11T21:46:08,620 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/.tmp/info/5edbbab900524904a17b13b546351989 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731361531624.8a44818e3874d8ebf6a5ff19dff66dd0./info:regioninfo/1731361532027/Put/seqid=0 2024-11-11T21:46:08,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741890_1076 (size=7089) 2024-11-11T21:46:08,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741890_1076 (size=7089) 2024-11-11T21:46:08,625 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/.tmp/info/5edbbab900524904a17b13b546351989 2024-11-11T21:46:08,645 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/.tmp/ns/fdf9e2d2e12d404081aa06451c633fae is 43, key is default/ns:d/1731361531416/Put/seqid=0 2024-11-11T21:46:08,648 WARN [Thread-1057 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,648 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43238 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741891_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data4]'}, localName='127.0.0.1:46829', datanodeUuid='8595ef5f-b595-4e43-8673-b19b1a241670', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741891_1077 to mirror 127.0.0.1:41799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:08,648 WARN [Thread-1057 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46829,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:46:08,648 WARN [Thread-1057 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741891_1077 2024-11-11T21:46:08,648 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43238 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741891_1077] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:46:08,648 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43238 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741891_1077] {}] datanode.DataXceiver(331): 127.0.0.1:46829:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43238 dst: /127.0.0.1:46829 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:08,649 WARN [Thread-1057 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:46:08,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741892_1078 (size=5153) 2024-11-11T21:46:08,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741892_1078 (size=5153) 2024-11-11T21:46:08,653 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/.tmp/ns/fdf9e2d2e12d404081aa06451c633fae 2024-11-11T21:46:08,674 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/.tmp/table/bdedec2c132640f3bfbb91340d9475b1 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731361532040/Put/seqid=0 2024-11-11T21:46:08,677 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41799 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:08,677 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43262 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741893_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data4]'}, localName='127.0.0.1:46829', datanodeUuid='8595ef5f-b595-4e43-8673-b19b1a241670', xmitsInProgress=0}:Exception transferring block BP-759734978-172.17.0.2-1731361529491:blk_1073741893_1079 to mirror 127.0.0.1:41799 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:08,677 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-759734978-172.17.0.2-1731361529491:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46829,DS-59c584b2-97b1-4424-93bb-b9dc9cf1a5bf,DISK], DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK]) is bad. 2024-11-11T21:46:08,677 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-759734978-172.17.0.2-1731361529491:blk_1073741893_1079 2024-11-11T21:46:08,677 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43262 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741893_1079] {}] datanode.BlockReceiver(316): Block 1073741893 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-11T21:46:08,677 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1357740717_22 at /127.0.0.1:43262 [Receiving block BP-759734978-172.17.0.2-1731361529491:blk_1073741893_1079] {}] datanode.DataXceiver(331): 127.0.0.1:46829:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43262 dst: /127.0.0.1:46829 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:08,678 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41799,DS-84414f94-462e-4c95-9d3a-8074c10b0154,DISK] 2024-11-11T21:46:08,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741894_1080 (size=5424) 2024-11-11T21:46:08,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741894_1080 (size=5424) 2024-11-11T21:46:08,683 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/.tmp/table/bdedec2c132640f3bfbb91340d9475b1 2024-11-11T21:46:08,690 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/.tmp/info/5edbbab900524904a17b13b546351989 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/info/5edbbab900524904a17b13b546351989 2024-11-11T21:46:08,697 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/info/5edbbab900524904a17b13b546351989, entries=10, sequenceid=11, filesize=6.9 K 2024-11-11T21:46:08,698 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/.tmp/ns/fdf9e2d2e12d404081aa06451c633fae as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/ns/fdf9e2d2e12d404081aa06451c633fae 2024-11-11T21:46:08,704 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/ns/fdf9e2d2e12d404081aa06451c633fae, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T21:46:08,705 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/.tmp/table/bdedec2c132640f3bfbb91340d9475b1 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/table/bdedec2c132640f3bfbb91340d9475b1 2024-11-11T21:46:08,711 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/table/bdedec2c132640f3bfbb91340d9475b1, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T21:46:08,713 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false 2024-11-11T21:46:08,718 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T21:46:08,718 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:46:08,719 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:46:08,719 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361568583Running coprocessor pre-close hooks at 1731361568583Disabling compacts and flushes for region at 1731361568583Disabling writes for close at 1731361568583Obtaining lock to block concurrent updates at 1731361568583Preparing flush snapshotting stores in 1588230740 at 1731361568583Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731361568583Flushing stores of hbase:meta,,1.1588230740 at 1731361568595 (+12 ms)Flushing 1588230740/info: creating writer at 1731361568595Flushing 1588230740/info: appending metadata at 1731361568620 (+25 ms)Flushing 1588230740/info: closing flushed file at 1731361568620Flushing 1588230740/ns: creating writer at 1731361568630 (+10 ms)Flushing 1588230740/ns: appending metadata at 1731361568645 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731361568645Flushing 1588230740/table: creating writer at 1731361568659 (+14 ms)Flushing 1588230740/table: appending metadata at 1731361568673 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731361568673Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d8e94b2: reopening flushed file at 1731361568689 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71aef548: reopening flushed file at 1731361568697 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20ff6cd1: reopening flushed file at 1731361568704 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 130ms, sequenceid=11, compaction requested=false at 1731361568713 (+9 ms)Writing region close event to WAL at 1731361568714 (+1 ms)Running coprocessor post-close hooks at 1731361568718 (+4 ms)Closed at 1731361568719 (+1 ms) 2024-11-11T21:46:08,719 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T21:46:08,783 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(976): stopping server 22a1f13b90b0,33181,1731361530149; all regions closed. 2024-11-11T21:46:08,784 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,784 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,784 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,785 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,785 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:08,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741889_1074 (size=825) 2024-11-11T21:46:08,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741889_1074 (size=825) 2024-11-11T21:46:08,805 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T21:46:08,806 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T21:46:08,808 INFO [regionserver/22a1f13b90b0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:46:09,226 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@15a1f42d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33611, datanodeUuid=748db9df-3e8d-4ea3-88d5-3839899f8e5b, infoPort=34265, infoSecurePort=0, ipcPort=33329, storageInfo=lv=-57;cid=testClusterID;nsid=751949495;c=1731361529491):Failed to transfer BP-759734978-172.17.0.2-1731361529491:blk_1073741864_1047 to 127.0.0.1:41799 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:09,584 INFO [regionserver/22a1f13b90b0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:46:09,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:46:09,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:46:10,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:46:11,437 INFO [master/22a1f13b90b0:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T21:46:11,437 INFO [master/22a1f13b90b0:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T21:46:11,463 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-11T21:46:11,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:46:11,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T21:46:12,586 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 after 4003ms 2024-11-11T21:46:12,597 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta after 4002ms 2024-11-11T21:46:12,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741835_1011 (size=393) 2024-11-11T21:46:12,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:46:13,583 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T21:46:13,587 DEBUG [RS:1;22a1f13b90b0:37551 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs 2024-11-11T21:46:13,587 INFO [RS:1;22a1f13b90b0:37551 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C37551%2C1731361531521:(num 1731361531725) 2024-11-11T21:46:13,587 DEBUG [RS:1;22a1f13b90b0:37551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:13,587 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:46:13,587 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:46:13,588 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.ChoreService(370): Chore service for: regionserver/22a1f13b90b0:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T21:46:13,588 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T21:46:13,588 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T21:46:13,588 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:46:13,588 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T21:46:13,588 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:46:13,588 INFO [RS:1;22a1f13b90b0:37551 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37551 2024-11-11T21:46:13,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37551-0x10030bfbd4a0002, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/22a1f13b90b0,37551,1731361531521 2024-11-11T21:46:13,591 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:46:13,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:46:13,593 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [22a1f13b90b0,37551,1731361531521] 2024-11-11T21:46:13,594 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/22a1f13b90b0,37551,1731361531521 already deleted, retry=false 2024-11-11T21:46:13,595 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 22a1f13b90b0,37551,1731361531521 expired; onlineServers=1 2024-11-11T21:46:13,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:13,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:13,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:13,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:13,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:13,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:13,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:13,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:13,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:13,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37551-0x10030bfbd4a0002, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:13,693 INFO [RS:1;22a1f13b90b0:37551 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:46:13,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37551-0x10030bfbd4a0002, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:13,693 INFO [RS:1;22a1f13b90b0:37551 {}] regionserver.HRegionServer(1031): Exiting; stopping=22a1f13b90b0,37551,1731361531521; zookeeper connection closed. 2024-11-11T21:46:13,693 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f1d7dd7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f1d7dd7 2024-11-11T21:46:13,786 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T21:46:13,792 DEBUG [RS:0;22a1f13b90b0:33181 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs 2024-11-11T21:46:13,792 INFO [RS:0;22a1f13b90b0:33181 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C33181%2C1731361530149.meta:.meta(num 1731361568584) 2024-11-11T21:46:13,792 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,793 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,793 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,793 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,794 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741883_1067 (size=16308) 2024-11-11T21:46:13,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741883_1067 (size=16308) 2024-11-11T21:46:13,801 DEBUG [RS:0;22a1f13b90b0:33181 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/oldWALs 2024-11-11T21:46:13,801 INFO [RS:0;22a1f13b90b0:33181 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C33181%2C1731361530149:(num 1731361568121) 2024-11-11T21:46:13,801 DEBUG [RS:0;22a1f13b90b0:33181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:13,801 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:46:13,801 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:46:13,801 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.ChoreService(370): Chore service for: regionserver/22a1f13b90b0:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T21:46:13,801 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:46:13,801 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:46:13,801 INFO [RS:0;22a1f13b90b0:33181 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33181 2024-11-11T21:46:13,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:46:13,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/22a1f13b90b0,33181,1731361530149 2024-11-11T21:46:13,803 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:46:13,804 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [22a1f13b90b0,33181,1731361530149] 2024-11-11T21:46:13,805 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/22a1f13b90b0,33181,1731361530149 already deleted, retry=false 2024-11-11T21:46:13,805 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 22a1f13b90b0,33181,1731361530149 expired; onlineServers=0 2024-11-11T21:46:13,805 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '22a1f13b90b0,42845,1731361530096' ***** 2024-11-11T21:46:13,805 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T21:46:13,805 INFO [M:0;22a1f13b90b0:42845 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:46:13,805 INFO [M:0;22a1f13b90b0:42845 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:46:13,805 DEBUG [M:0;22a1f13b90b0:42845 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T21:46:13,805 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T21:46:13,805 DEBUG [M:0;22a1f13b90b0:42845 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T21:46:13,805 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361530732 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361530732,5,FailOnTimeoutGroup] 2024-11-11T21:46:13,805 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361530732 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361530732,5,FailOnTimeoutGroup] 2024-11-11T21:46:13,806 INFO [M:0;22a1f13b90b0:42845 {}] hbase.ChoreService(370): Chore service for: master/22a1f13b90b0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T21:46:13,806 INFO [M:0;22a1f13b90b0:42845 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:46:13,806 DEBUG [M:0;22a1f13b90b0:42845 {}] master.HMaster(1795): Stopping service threads 2024-11-11T21:46:13,806 INFO [M:0;22a1f13b90b0:42845 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T21:46:13,806 INFO [M:0;22a1f13b90b0:42845 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:46:13,806 INFO [M:0;22a1f13b90b0:42845 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T21:46:13,806 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T21:46:13,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T21:46:13,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:13,807 DEBUG [M:0;22a1f13b90b0:42845 {}] zookeeper.ZKUtil(347): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T21:46:13,807 WARN [M:0;22a1f13b90b0:42845 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T21:46:13,807 INFO [M:0;22a1f13b90b0:42845 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/.lastflushedseqids 2024-11-11T21:46:13,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741895_1081 (size=130) 2024-11-11T21:46:13,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741895_1081 (size=130) 2024-11-11T21:46:13,816 INFO [M:0;22a1f13b90b0:42845 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T21:46:13,816 INFO [M:0;22a1f13b90b0:42845 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T21:46:13,817 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:46:13,817 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:13,817 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:13,817 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:46:13,817 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:13,817 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-11T21:46:13,833 DEBUG [M:0;22a1f13b90b0:42845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc554f32dad44f96bed400d82984d001 is 82, key is hbase:meta,,1/info:regioninfo/1731361531396/Put/seqid=0 2024-11-11T21:46:13,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741896_1082 (size=5672) 2024-11-11T21:46:13,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741896_1082 (size=5672) 2024-11-11T21:46:13,839 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc554f32dad44f96bed400d82984d001 2024-11-11T21:46:13,860 DEBUG [M:0;22a1f13b90b0:42845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd267fc9e7cc4375a1297f55cea7f097 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731361532045/Put/seqid=0 2024-11-11T21:46:13,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741897_1083 (size=6256) 2024-11-11T21:46:13,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741897_1083 (size=6256) 2024-11-11T21:46:13,866 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd267fc9e7cc4375a1297f55cea7f097 2024-11-11T21:46:13,872 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cd267fc9e7cc4375a1297f55cea7f097 2024-11-11T21:46:13,895 DEBUG [M:0;22a1f13b90b0:42845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87f58dc8578442ad9799930b894871d5 is 69, key is 22a1f13b90b0,33181,1731361530149/rs:state/1731361530790/Put/seqid=0 2024-11-11T21:46:13,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741898_1084 (size=5224) 2024-11-11T21:46:13,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741898_1084 (size=5224) 2024-11-11T21:46:13,900 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87f58dc8578442ad9799930b894871d5 2024-11-11T21:46:13,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:13,904 INFO [RS:0;22a1f13b90b0:33181 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:46:13,904 INFO [RS:0;22a1f13b90b0:33181 {}] regionserver.HRegionServer(1031): Exiting; stopping=22a1f13b90b0,33181,1731361530149; zookeeper connection closed. 2024-11-11T21:46:13,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33181-0x10030bfbd4a0001, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:13,904 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1c7e60bb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1c7e60bb 2024-11-11T21:46:13,905 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-11T21:46:13,919 DEBUG [M:0;22a1f13b90b0:42845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bf718fbb77104c49856bc750040e6479 is 52, key is load_balancer_on/state:d/1731361531504/Put/seqid=0 2024-11-11T21:46:13,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741899_1085 (size=5056) 2024-11-11T21:46:13,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741899_1085 (size=5056) 2024-11-11T21:46:13,925 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bf718fbb77104c49856bc750040e6479 2024-11-11T21:46:13,930 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc554f32dad44f96bed400d82984d001 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dc554f32dad44f96bed400d82984d001 2024-11-11T21:46:13,935 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dc554f32dad44f96bed400d82984d001, entries=8, sequenceid=60, filesize=5.5 K 2024-11-11T21:46:13,937 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd267fc9e7cc4375a1297f55cea7f097 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cd267fc9e7cc4375a1297f55cea7f097 2024-11-11T21:46:13,942 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cd267fc9e7cc4375a1297f55cea7f097 2024-11-11T21:46:13,942 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cd267fc9e7cc4375a1297f55cea7f097, entries=6, sequenceid=60, filesize=6.1 K 2024-11-11T21:46:13,943 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87f58dc8578442ad9799930b894871d5 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87f58dc8578442ad9799930b894871d5 2024-11-11T21:46:13,948 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87f58dc8578442ad9799930b894871d5, entries=2, sequenceid=60, filesize=5.1 K 2024-11-11T21:46:13,949 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bf718fbb77104c49856bc750040e6479 as hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bf718fbb77104c49856bc750040e6479 2024-11-11T21:46:13,954 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bf718fbb77104c49856bc750040e6479, entries=1, sequenceid=60, filesize=4.9 K 2024-11-11T21:46:13,955 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=60, compaction requested=false 2024-11-11T21:46:13,957 INFO [M:0;22a1f13b90b0:42845 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:13,957 DEBUG [M:0;22a1f13b90b0:42845 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361573817Disabling compacts and flushes for region at 1731361573817Disabling writes for close at 1731361573817Obtaining lock to block concurrent updates at 1731361573817Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731361573817Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731361573817Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731361573818 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731361573818Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731361573833 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731361573833Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731361573844 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731361573860 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731361573860Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731361573872 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731361573894 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731361573894Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731361573905 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731361573919 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731361573919Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b79c632: reopening flushed file at 1731361573929 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d10dc32: reopening flushed file at 1731361573936 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c9ced74: reopening flushed file at 1731361573942 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4653fa86: reopening flushed file at 1731361573948 (+6 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=60, compaction requested=false at 1731361573955 (+7 ms)Writing region close event to WAL at 1731361573957 (+2 ms)Closed at 1731361573957 2024-11-11T21:46:13,957 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,957 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,957 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,958 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,958 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:13,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46829 is added to blk_1073741882_1065 (size=1045) 2024-11-11T21:46:13,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33611 is added to blk_1073741882_1065 (size=1045) 2024-11-11T21:46:14,136 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T21:46:14,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:14,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:14,160 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:14,160 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:14,160 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:14,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:14,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:14,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:14,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:14,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:15,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:15,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:16,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:16,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:17,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:17,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:18,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:18,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:18,667 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@47783789 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-759734978-172.17.0.2-1731361529491:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:39385,null,null]) java.net.ConnectException: Call From 22a1f13b90b0/172.17.0.2 to localhost:42557 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T21:46:18,958 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-11T21:46:18,959 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:46:18,959 INFO [M:0;22a1f13b90b0:42845 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T21:46:18,959 INFO [M:0;22a1f13b90b0:42845 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42845 2024-11-11T21:46:18,960 INFO [M:0;22a1f13b90b0:42845 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:46:19,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:19,063 INFO [M:0;22a1f13b90b0:42845 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:46:19,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42845-0x10030bfbd4a0000, quorum=127.0.0.1:64033, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:19,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1929003e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:19,071 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16a2580d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:19,071 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:19,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2234d719{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:19,072 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12f49c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:19,073 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:46:19,074 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:46:19,074 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:46:19,074 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-759734978-172.17.0.2-1731361529491 (Datanode Uuid 8595ef5f-b595-4e43-8673-b19b1a241670) service to localhost/127.0.0.1:39361 2024-11-11T21:46:19,073 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@47783789 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39385,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:42557 , LocalHost:localPort 22a1f13b90b0/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-11T21:46:19,074 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@47783789 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46829,null,null]) java.io.IOException: No block pool offer service for bpid=BP-759734978-172.17.0.2-1731361529491 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:19,074 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@47783789 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:39385,null,null], DatanodeInfoWithStorage[127.0.0.1:46829,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-759734978-172.17.0.2-1731361529491:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:39385,null,null], DatanodeInfoWithStorage[127.0.0.1:46829,null,null]] 2024-11-11T21:46:19,074 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data3/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:19,075 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data4/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:19,075 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:46:19,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f0a2519{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:19,077 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68004957{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:19,077 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:19,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719bbb9b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:19,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fb911ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:19,079 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:46:19,079 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:46:19,079 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:46:19,079 WARN [BP-759734978-172.17.0.2-1731361529491 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-759734978-172.17.0.2-1731361529491 (Datanode Uuid 748db9df-3e8d-4ea3-88d5-3839899f8e5b) service to localhost/127.0.0.1:39361 2024-11-11T21:46:19,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data9/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:19,080 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/cluster_97c42dc2-8d2f-dc96-f2c8-3e8a9f23b902/data/data10/current/BP-759734978-172.17.0.2-1731361529491 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:19,080 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:46:19,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@477187fb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:46:19,087 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a488aac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:19,087 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:19,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e7d5546{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:19,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71ee126f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:19,095 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T21:46:19,125 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T21:46:19,133 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 77) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:39361 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36327 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39361 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:39361 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:39361 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:39361 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:36327 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39361 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:39361 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$897/0x00007efcb8bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$897/0x00007efcb8bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39361 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$897/0x00007efcb8bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39361 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39361 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:39361 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=440 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=110 (was 149), ProcessCount=11 (was 11), AvailableMemoryMB=4453 (was 5149) 2024-11-11T21:46:19,140 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=440, MaxFileDescriptor=1048576, SystemLoadAverage=110, ProcessCount=11, AvailableMemoryMB=4453 2024-11-11T21:46:19,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T21:46:19,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.log.dir so I do NOT create it in target/test-data/0600b998-e857-a43e-c167-8c11ea931226 2024-11-11T21:46:19,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6fff4665-a2ec-669e-b83d-5a6754da838d/hadoop.tmp.dir so I do NOT create it in target/test-data/0600b998-e857-a43e-c167-8c11ea931226 2024-11-11T21:46:19,140 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a, deleteOnExit=true 2024-11-11T21:46:19,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T21:46:19,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/test.cache.data in system properties and HBase conf 2024-11-11T21:46:19,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T21:46:19,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir in system properties and HBase conf 2024-11-11T21:46:19,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T21:46:19,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T21:46:19,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T21:46:19,141 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T21:46:19,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:46:19,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:46:19,141 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/nfs.dump.dir in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/java.io.tmpdir in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T21:46:19,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T21:46:19,154 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:46:19,202 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:19,206 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:19,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:19,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:19,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:46:19,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:19,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a58a89e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:19,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bd03e52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:19,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10a92d53{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/java.io.tmpdir/jetty-localhost-33699-hadoop-hdfs-3_4_1-tests_jar-_-any-15922107394680439769/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:46:19,304 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ddfefdc{HTTP/1.1, (http/1.1)}{localhost:33699} 2024-11-11T21:46:19,304 INFO [Time-limited test {}] server.Server(415): Started @153704ms 2024-11-11T21:46:19,316 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:46:19,362 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:19,367 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:19,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:19,368 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:19,368 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:46:19,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e3b8424{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:19,369 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25f63c50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:19,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@482b9b0f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/java.io.tmpdir/jetty-localhost-44879-hadoop-hdfs-3_4_1-tests_jar-_-any-9105470526895766168/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:19,462 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b6caabd{HTTP/1.1, (http/1.1)}{localhost:44879} 2024-11-11T21:46:19,463 INFO [Time-limited test {}] server.Server(415): Started @153862ms 2024-11-11T21:46:19,464 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:46:19,495 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:19,500 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:19,500 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:19,500 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:19,500 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:46:19,501 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61f9169f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:19,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11255fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:19,529 WARN [Thread-1188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data1/current/BP-2083987259-172.17.0.2-1731361579165/current, will proceed with Du for space computation calculation, 2024-11-11T21:46:19,529 WARN [Thread-1189 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data2/current/BP-2083987259-172.17.0.2-1731361579165/current, will proceed with Du for space computation calculation, 2024-11-11T21:46:19,543 WARN [Thread-1167 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:46:19,546 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4a97e4156549eb2 with lease ID 0x69ad2e16e342285a: Processing first storage report for DS-5ff179da-ea95-46a2-8236-cad9b6cb1965 from datanode DatanodeRegistration(127.0.0.1:37601, datanodeUuid=6a6f40a7-2edc-45d9-b369-02cae246d62a, infoPort=36943, infoSecurePort=0, ipcPort=45391, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165) 2024-11-11T21:46:19,546 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4a97e4156549eb2 with lease ID 0x69ad2e16e342285a: from storage DS-5ff179da-ea95-46a2-8236-cad9b6cb1965 node DatanodeRegistration(127.0.0.1:37601, datanodeUuid=6a6f40a7-2edc-45d9-b369-02cae246d62a, infoPort=36943, infoSecurePort=0, ipcPort=45391, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:19,546 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4a97e4156549eb2 with lease ID 0x69ad2e16e342285a: Processing first storage report for DS-bd3615d3-a419-4744-8bce-9147d0167f89 from datanode DatanodeRegistration(127.0.0.1:37601, datanodeUuid=6a6f40a7-2edc-45d9-b369-02cae246d62a, infoPort=36943, infoSecurePort=0, ipcPort=45391, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165) 2024-11-11T21:46:19,546 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4a97e4156549eb2 with lease ID 0x69ad2e16e342285a: from storage DS-bd3615d3-a419-4744-8bce-9147d0167f89 node DatanodeRegistration(127.0.0.1:37601, datanodeUuid=6a6f40a7-2edc-45d9-b369-02cae246d62a, infoPort=36943, infoSecurePort=0, ipcPort=45391, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:19,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@37b7928{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/java.io.tmpdir/jetty-localhost-34667-hadoop-hdfs-3_4_1-tests_jar-_-any-18144325042951758796/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:19,600 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@671953b5{HTTP/1.1, (http/1.1)}{localhost:34667} 2024-11-11T21:46:19,600 INFO [Time-limited test {}] server.Server(415): Started @153999ms 2024-11-11T21:46:19,601 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:46:19,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:19,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:19,655 WARN [Thread-1215 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data4/current/BP-2083987259-172.17.0.2-1731361579165/current, will proceed with Du for space computation calculation, 2024-11-11T21:46:19,655 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data3/current/BP-2083987259-172.17.0.2-1731361579165/current, will proceed with Du for space computation calculation, 2024-11-11T21:46:19,672 WARN [Thread-1203 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:46:19,674 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf62ba9ad0b0b0feb with lease ID 0x69ad2e16e342285b: Processing first storage report for DS-ecec349a-94e7-415a-8c65-f68821dff2b1 from datanode DatanodeRegistration(127.0.0.1:43031, datanodeUuid=0de43f7f-7633-4233-b6cd-9dcd438b25bc, infoPort=40755, infoSecurePort=0, ipcPort=44211, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165) 2024-11-11T21:46:19,674 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf62ba9ad0b0b0feb with lease ID 0x69ad2e16e342285b: from storage DS-ecec349a-94e7-415a-8c65-f68821dff2b1 node DatanodeRegistration(127.0.0.1:43031, datanodeUuid=0de43f7f-7633-4233-b6cd-9dcd438b25bc, infoPort=40755, infoSecurePort=0, ipcPort=44211, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:19,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf62ba9ad0b0b0feb with lease ID 0x69ad2e16e342285b: Processing first storage report for DS-b5acc3a9-e542-4229-b6d1-b0871983bb12 from datanode DatanodeRegistration(127.0.0.1:43031, datanodeUuid=0de43f7f-7633-4233-b6cd-9dcd438b25bc, infoPort=40755, infoSecurePort=0, ipcPort=44211, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165) 2024-11-11T21:46:19,675 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf62ba9ad0b0b0feb with lease ID 0x69ad2e16e342285b: from storage DS-b5acc3a9-e542-4229-b6d1-b0871983bb12 node DatanodeRegistration(127.0.0.1:43031, datanodeUuid=0de43f7f-7633-4233-b6cd-9dcd438b25bc, infoPort=40755, infoSecurePort=0, ipcPort=44211, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:19,724 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226 2024-11-11T21:46:19,728 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/zookeeper_0, clientPort=64425, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T21:46:19,729 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64425 2024-11-11T21:46:19,729 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:19,731 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:19,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43031 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:46:19,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:46:19,742 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9 with version=8 2024-11-11T21:46:19,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/hbase-staging 2024-11-11T21:46:19,744 INFO [Time-limited test {}] client.ConnectionUtils(128): master/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:46:19,744 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:19,744 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:19,744 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:46:19,745 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:19,745 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:46:19,745 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T21:46:19,745 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:46:19,745 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45543 2024-11-11T21:46:19,747 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45543 connecting to ZooKeeper ensemble=127.0.0.1:64425 2024-11-11T21:46:19,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:455430x0, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:46:19,751 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45543-0x10030c07f3d0000 connected 2024-11-11T21:46:19,761 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:19,762 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:19,764 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:46:19,764 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9, hbase.cluster.distributed=false 2024-11-11T21:46:19,766 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:46:19,766 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45543 2024-11-11T21:46:19,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45543 2024-11-11T21:46:19,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45543 2024-11-11T21:46:19,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45543 2024-11-11T21:46:19,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45543 2024-11-11T21:46:19,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:19,783 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:46:19,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:19,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:19,783 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:46:19,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:19,783 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:46:19,783 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T21:46:19,783 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:46:19,784 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43249 2024-11-11T21:46:19,785 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43249 connecting to ZooKeeper ensemble=127.0.0.1:64425 2024-11-11T21:46:19,786 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:19,787 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:19,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432490x0, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:46:19,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:432490x0, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:46:19,790 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43249-0x10030c07f3d0001 connected 2024-11-11T21:46:19,791 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T21:46:19,791 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T21:46:19,792 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T21:46:19,793 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:46:19,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43249 2024-11-11T21:46:19,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43249 2024-11-11T21:46:19,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43249 2024-11-11T21:46:19,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43249 2024-11-11T21:46:19,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43249 2024-11-11T21:46:19,804 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;22a1f13b90b0:45543 2024-11-11T21:46:19,805 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:19,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:46:19,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:46:19,806 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:19,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T21:46:19,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,808 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T21:46:19,808 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/22a1f13b90b0,45543,1731361579744 from backup master directory 2024-11-11T21:46:19,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:19,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:46:19,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:46:19,809 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:46:19,809 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:19,813 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/hbase.id] with ID: 4e89af10-665b-4bfd-8d2b-a378f28a654c 2024-11-11T21:46:19,813 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/.tmp/hbase.id 2024-11-11T21:46:19,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:46:19,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43031 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:46:19,819 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/.tmp/hbase.id]:[hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/hbase.id] 2024-11-11T21:46:19,830 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:19,830 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T21:46:19,831 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T21:46:19,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43031 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:46:19,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:46:19,841 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:46:19,842 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T21:46:19,842 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:46:19,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:46:19,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43031 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:46:19,852 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store 2024-11-11T21:46:19,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:46:19,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43031 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:46:19,859 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:19,859 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:46:19,859 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:19,859 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:19,859 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:46:19,860 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:19,860 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:19,860 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361579859Disabling compacts and flushes for region at 1731361579859Disabling writes for close at 1731361579860 (+1 ms)Writing region close event to WAL at 1731361579860Closed at 1731361579860 2024-11-11T21:46:19,861 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/.initializing 2024-11-11T21:46:19,861 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:19,863 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C45543%2C1731361579744, suffix=, logDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744, archiveDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/oldWALs, maxLogs=10 2024-11-11T21:46:19,864 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C45543%2C1731361579744.1731361579863 2024-11-11T21:46:19,868 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 2024-11-11T21:46:19,869 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36943:36943),(127.0.0.1/127.0.0.1:40755:40755)] 2024-11-11T21:46:19,869 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:46:19,870 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:19,870 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,870 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,872 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T21:46:19,872 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:19,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:19,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T21:46:19,874 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:19,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:46:19,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T21:46:19,876 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:19,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:46:19,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T21:46:19,877 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:19,878 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:46:19,878 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,879 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,879 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,880 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,880 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,881 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T21:46:19,882 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:19,884 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:46:19,885 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702012, jitterRate=-0.10734590888023376}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T21:46:19,885 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731361579870Initializing all the Stores at 1731361579871 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361579871Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361579871Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361579871Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361579871Cleaning up temporary data from old regions at 1731361579880 (+9 ms)Region opened successfully at 1731361579885 (+5 ms) 2024-11-11T21:46:19,886 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T21:46:19,889 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e984b5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:46:19,890 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T21:46:19,890 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T21:46:19,890 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T21:46:19,890 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T21:46:19,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T21:46:19,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T21:46:19,891 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T21:46:19,893 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T21:46:19,894 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T21:46:19,894 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T21:46:19,895 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T21:46:19,895 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T21:46:19,896 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T21:46:19,896 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T21:46:19,897 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T21:46:19,897 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T21:46:19,899 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T21:46:19,899 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T21:46:19,901 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T21:46:19,901 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T21:46:19,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:46:19,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:46:19,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,903 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=22a1f13b90b0,45543,1731361579744, sessionid=0x10030c07f3d0000, setting cluster-up flag (Was=false) 2024-11-11T21:46:19,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,907 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T21:46:19,908 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:19,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:19,913 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T21:46:19,914 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:19,915 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T21:46:19,916 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T21:46:19,917 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T21:46:19,917 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T21:46:19,917 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 22a1f13b90b0,45543,1731361579744 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T21:46:19,919 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:46:19,919 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:46:19,919 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:46:19,919 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:46:19,919 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/22a1f13b90b0:0, corePoolSize=10, maxPoolSize=10 2024-11-11T21:46:19,919 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:19,919 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:46:19,919 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:19,920 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731361609920 2024-11-11T21:46:19,920 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T21:46:19,920 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T21:46:19,920 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T21:46:19,920 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T21:46:19,920 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T21:46:19,920 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T21:46:19,920 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:19,921 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T21:46:19,921 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T21:46:19,921 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:46:19,921 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T21:46:19,921 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T21:46:19,921 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T21:46:19,921 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T21:46:19,921 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361579921,5,FailOnTimeoutGroup] 2024-11-11T21:46:19,922 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:19,922 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T21:46:19,925 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361579921,5,FailOnTimeoutGroup] 2024-11-11T21:46:19,925 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:19,925 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T21:46:19,925 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:19,925 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:19,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:46:19,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43031 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:46:19,933 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T21:46:19,933 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9 2024-11-11T21:46:19,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43031 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:46:19,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:46:19,940 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:19,941 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:46:19,942 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:46:19,942 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:19,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:19,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:46:19,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:46:19,944 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:19,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:19,944 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:46:19,945 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:46:19,945 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:19,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:19,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:46:19,947 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:46:19,947 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:19,947 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:19,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:46:19,948 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740 2024-11-11T21:46:19,948 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740 2024-11-11T21:46:19,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:46:19,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:46:19,950 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:46:19,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:46:19,953 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:46:19,954 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797345, jitterRate=0.013876721262931824}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:46:19,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731361579940Initializing all the Stores at 1731361579941 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361579941Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361579941Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361579941Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361579941Cleaning up temporary data from old regions at 1731361579950 (+9 ms)Region opened successfully at 1731361579954 (+4 ms) 2024-11-11T21:46:19,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:46:19,954 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:46:19,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:46:19,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:46:19,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:46:19,955 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:46:19,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361579954Disabling compacts and flushes for region at 1731361579954Disabling writes for close at 1731361579954Writing region close event to WAL at 1731361579955 (+1 ms)Closed at 1731361579955 2024-11-11T21:46:19,956 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:46:19,956 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T21:46:19,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T21:46:19,957 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:46:19,958 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T21:46:19,997 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(746): ClusterId : 4e89af10-665b-4bfd-8d2b-a378f28a654c 2024-11-11T21:46:19,998 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T21:46:20,002 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T21:46:20,002 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T21:46:20,006 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T21:46:20,007 DEBUG [RS:0;22a1f13b90b0:43249 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3777a673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:46:20,026 DEBUG [RS:0;22a1f13b90b0:43249 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;22a1f13b90b0:43249 2024-11-11T21:46:20,027 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T21:46:20,027 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T21:46:20,027 DEBUG [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T21:46:20,027 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(2659): reportForDuty to master=22a1f13b90b0,45543,1731361579744 with port=43249, startcode=1731361579783 2024-11-11T21:46:20,028 DEBUG [RS:0;22a1f13b90b0:43249 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T21:46:20,029 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51771, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T21:46:20,030 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45543 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,030 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45543 {}] master.ServerManager(517): Registering regionserver=22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,032 DEBUG [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9 2024-11-11T21:46:20,032 DEBUG [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34237 2024-11-11T21:46:20,032 DEBUG [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T21:46:20,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:46:20,034 DEBUG [RS:0;22a1f13b90b0:43249 {}] zookeeper.ZKUtil(111): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,034 WARN [RS:0;22a1f13b90b0:43249 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:46:20,034 INFO [RS:0;22a1f13b90b0:43249 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:46:20,034 DEBUG [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,034 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [22a1f13b90b0,43249,1731361579783] 2024-11-11T21:46:20,037 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T21:46:20,040 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T21:46:20,040 INFO [RS:0;22a1f13b90b0:43249 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T21:46:20,040 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,040 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T21:46:20,041 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T21:46:20,041 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,041 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,041 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:46:20,042 DEBUG [RS:0;22a1f13b90b0:43249 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:46:20,043 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,043 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,043 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,043 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,043 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,043 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,43249,1731361579783-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:46:20,057 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T21:46:20,057 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,43249,1731361579783-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,057 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,057 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.Replication(171): 22a1f13b90b0,43249,1731361579783 started 2024-11-11T21:46:20,070 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,070 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(1482): Serving as 22a1f13b90b0,43249,1731361579783, RpcServer on 22a1f13b90b0/172.17.0.2:43249, sessionid=0x10030c07f3d0001 2024-11-11T21:46:20,070 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T21:46:20,070 DEBUG [RS:0;22a1f13b90b0:43249 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,070 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,43249,1731361579783' 2024-11-11T21:46:20,071 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T21:46:20,071 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T21:46:20,071 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T21:46:20,071 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T21:46:20,071 DEBUG [RS:0;22a1f13b90b0:43249 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,071 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,43249,1731361579783' 2024-11-11T21:46:20,071 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T21:46:20,072 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T21:46:20,072 DEBUG [RS:0;22a1f13b90b0:43249 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T21:46:20,072 INFO [RS:0;22a1f13b90b0:43249 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T21:46:20,072 INFO [RS:0;22a1f13b90b0:43249 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T21:46:20,109 WARN [22a1f13b90b0:45543 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T21:46:20,177 INFO [RS:0;22a1f13b90b0:43249 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C43249%2C1731361579783, suffix=, logDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783, archiveDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/oldWALs, maxLogs=32 2024-11-11T21:46:20,180 INFO [RS:0;22a1f13b90b0:43249 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C43249%2C1731361579783.1731361580179 2024-11-11T21:46:20,188 INFO [RS:0;22a1f13b90b0:43249 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 2024-11-11T21:46:20,190 DEBUG [RS:0;22a1f13b90b0:43249 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36943:36943),(127.0.0.1/127.0.0.1:40755:40755)] 2024-11-11T21:46:20,359 DEBUG [22a1f13b90b0:45543 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T21:46:20,361 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,365 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,43249,1731361579783, state=OPENING 2024-11-11T21:46:20,368 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T21:46:20,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:20,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:20,371 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:46:20,371 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:46:20,371 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:46:20,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,43249,1731361579783}] 2024-11-11T21:46:20,526 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T21:46:20,532 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34299, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T21:46:20,538 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T21:46:20,538 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:46:20,541 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C43249%2C1731361579783.meta, suffix=.meta, logDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783, archiveDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/oldWALs, maxLogs=32 2024-11-11T21:46:20,542 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta 2024-11-11T21:46:20,549 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta 2024-11-11T21:46:20,550 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40755:40755),(127.0.0.1/127.0.0.1:36943:36943)] 2024-11-11T21:46:20,553 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:46:20,553 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T21:46:20,553 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T21:46:20,553 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T21:46:20,553 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T21:46:20,554 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:20,554 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T21:46:20,554 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T21:46:20,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:46:20,557 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:46:20,557 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:20,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:20,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:46:20,558 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:46:20,558 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:20,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:20,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:46:20,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:46:20,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:20,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:20,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:46:20,560 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:46:20,560 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:20,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:20,561 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:46:20,562 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740 2024-11-11T21:46:20,563 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740 2024-11-11T21:46:20,564 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:46:20,564 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:46:20,565 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:46:20,566 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:46:20,567 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759713, jitterRate=-0.03397610783576965}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:46:20,567 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T21:46:20,567 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731361580554Writing region info on filesystem at 1731361580554Initializing all the Stores at 1731361580555 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361580555Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361580556 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361580556Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361580556Cleaning up temporary data from old regions at 1731361580564 (+8 ms)Running coprocessor post-open hooks at 1731361580567 (+3 ms)Region opened successfully at 1731361580567 2024-11-11T21:46:20,568 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731361580526 2024-11-11T21:46:20,570 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T21:46:20,570 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T21:46:20,571 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,572 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,43249,1731361579783, state=OPEN 2024-11-11T21:46:20,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:46:20,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:46:20,574 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:46:20,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:46:20,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T21:46:20,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,43249,1731361579783 in 204 msec 2024-11-11T21:46:20,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T21:46:20,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 622 msec 2024-11-11T21:46:20,581 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:46:20,581 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T21:46:20,582 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:46:20,582 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,43249,1731361579783, seqNum=-1] 2024-11-11T21:46:20,583 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:46:20,584 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48343, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:46:20,589 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 672 msec 2024-11-11T21:46:20,589 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731361580589, completionTime=-1 2024-11-11T21:46:20,589 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T21:46:20,589 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T21:46:20,591 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T21:46:20,591 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731361640591 2024-11-11T21:46:20,591 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731361700591 2024-11-11T21:46:20,591 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T21:46:20,592 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45543,1731361579744-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,592 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45543,1731361579744-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,592 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45543,1731361579744-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,592 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-22a1f13b90b0:45543, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,592 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,592 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,593 DEBUG [master/22a1f13b90b0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T21:46:20,595 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.786sec 2024-11-11T21:46:20,596 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T21:46:20,596 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T21:46:20,596 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T21:46:20,596 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T21:46:20,596 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T21:46:20,596 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45543,1731361579744-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:46:20,596 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45543,1731361579744-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T21:46:20,599 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T21:46:20,599 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T21:46:20,599 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45543,1731361579744-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:20,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:20,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:20,697 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a20a16b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:46:20,697 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 22a1f13b90b0,45543,-1 for getting cluster id 2024-11-11T21:46:20,697 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T21:46:20,699 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4e89af10-665b-4bfd-8d2b-a378f28a654c' 2024-11-11T21:46:20,699 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T21:46:20,699 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4e89af10-665b-4bfd-8d2b-a378f28a654c" 2024-11-11T21:46:20,699 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24123ae9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:46:20,700 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [22a1f13b90b0,45543,-1] 2024-11-11T21:46:20,700 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T21:46:20,700 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:20,702 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41286, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T21:46:20,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37b5496d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:46:20,703 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:46:20,704 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,43249,1731361579783, seqNum=-1] 2024-11-11T21:46:20,704 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:46:20,706 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:46:20,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:20,708 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:20,710 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T21:46:20,710 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-11T21:46:20,711 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-11T21:46:20,711 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T21:46:20,712 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:20,712 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5f266793 2024-11-11T21:46:20,712 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T21:46:20,714 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41298, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T21:46:20,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45543 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T21:46:20,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45543 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T21:46:20,715 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45543 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:46:20,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45543 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T21:46:20,717 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T21:46:20,718 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:20,718 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45543 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-11T21:46:20,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:46:20,719 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T21:46:20,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43031 is added to blk_1073741835_1011 (size=395) 2024-11-11T21:46:20,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741835_1011 (size=395) 2024-11-11T21:46:20,727 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bf1e6c0d21761d49775a874935def3d2, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9 2024-11-11T21:46:20,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43031 is added to blk_1073741836_1012 (size=78) 2024-11-11T21:46:20,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37601 is added to blk_1073741836_1012 (size=78) 2024-11-11T21:46:20,734 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:20,734 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing bf1e6c0d21761d49775a874935def3d2, disabling compactions & flushes 2024-11-11T21:46:20,734 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:20,734 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:20,734 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. after waiting 0 ms 2024-11-11T21:46:20,734 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:20,734 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:20,734 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for bf1e6c0d21761d49775a874935def3d2: Waiting for close lock at 1731361580734Disabling compacts and flushes for region at 1731361580734Disabling writes for close at 1731361580734Writing region close event to WAL at 1731361580734Closed at 1731361580734 2024-11-11T21:46:20,736 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T21:46:20,736 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731361580736"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731361580736"}]},"ts":"1731361580736"} 2024-11-11T21:46:20,739 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T21:46:20,740 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T21:46:20,740 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361580740"}]},"ts":"1731361580740"} 2024-11-11T21:46:20,743 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-11T21:46:20,743 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bf1e6c0d21761d49775a874935def3d2, ASSIGN}] 2024-11-11T21:46:20,745 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bf1e6c0d21761d49775a874935def3d2, ASSIGN 2024-11-11T21:46:20,746 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bf1e6c0d21761d49775a874935def3d2, ASSIGN; state=OFFLINE, location=22a1f13b90b0,43249,1731361579783; forceNewPlan=false, retain=false 2024-11-11T21:46:20,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:20,896 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bf1e6c0d21761d49775a874935def3d2, regionState=OPENING, regionLocation=22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:20,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bf1e6c0d21761d49775a874935def3d2, ASSIGN because future has completed 2024-11-11T21:46:20,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bf1e6c0d21761d49775a874935def3d2, server=22a1f13b90b0,43249,1731361579783}] 2024-11-11T21:46:21,063 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:21,064 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bf1e6c0d21761d49775a874935def3d2, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:46:21,064 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,064 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:21,065 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,065 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,067 INFO [StoreOpener-bf1e6c0d21761d49775a874935def3d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,069 INFO [StoreOpener-bf1e6c0d21761d49775a874935def3d2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bf1e6c0d21761d49775a874935def3d2 columnFamilyName info 2024-11-11T21:46:21,069 DEBUG [StoreOpener-bf1e6c0d21761d49775a874935def3d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:21,070 INFO [StoreOpener-bf1e6c0d21761d49775a874935def3d2-1 {}] regionserver.HStore(327): Store=bf1e6c0d21761d49775a874935def3d2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:46:21,070 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,071 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,071 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,072 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,072 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,074 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,078 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/bf1e6c0d21761d49775a874935def3d2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:46:21,079 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bf1e6c0d21761d49775a874935def3d2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792059, jitterRate=0.007155567407608032}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T21:46:21,079 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:21,080 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bf1e6c0d21761d49775a874935def3d2: Running coprocessor pre-open hook at 1731361581065Writing region info on filesystem at 1731361581065Initializing all the Stores at 1731361581066 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361581066Cleaning up temporary data from old regions at 1731361581072 (+6 ms)Running coprocessor post-open hooks at 1731361581079 (+7 ms)Region opened successfully at 1731361581080 (+1 ms) 2024-11-11T21:46:21,081 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2., pid=6, masterSystemTime=1731361581054 2024-11-11T21:46:21,084 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:21,085 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:21,086 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bf1e6c0d21761d49775a874935def3d2, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:21,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bf1e6c0d21761d49775a874935def3d2, server=22a1f13b90b0,43249,1731361579783 because future has completed 2024-11-11T21:46:21,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T21:46:21,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bf1e6c0d21761d49775a874935def3d2, server=22a1f13b90b0,43249,1731361579783 in 191 msec 2024-11-11T21:46:21,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T21:46:21,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=bf1e6c0d21761d49775a874935def3d2, ASSIGN in 350 msec 2024-11-11T21:46:21,096 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T21:46:21,097 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361581097"}]},"ts":"1731361581097"} 2024-11-11T21:46:21,099 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-11T21:46:21,100 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T21:46:21,102 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 385 msec 2024-11-11T21:46:21,463 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T21:46:21,464 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T21:46:21,466 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T21:46:21,466 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-11T21:46:21,468 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:46:21,468 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T21:46:21,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:21,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:21,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:22,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:22,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:22,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:23,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:23,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:23,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:24,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:24,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:24,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:25,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,578 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:25,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:25,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:25,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:26,086 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T21:46:26,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:26,122 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T21:46:26,123 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-11T21:46:26,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:26,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:26,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:27,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:27,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:27,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:28,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:28,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:28,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:29,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:29,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:29,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:30,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:30,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:30,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:30,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:46:30,794 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-11T21:46:30,794 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-11T21:46:30,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T21:46:30,803 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:30,806 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2., hostname=22a1f13b90b0,43249,1731361579783, seqNum=2] 2024-11-11T21:46:31,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:31,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:31,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:32,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:32,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:32,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:32,809 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 2024-11-11T21:46:32,810 WARN [ResponseProcessor for block BP-2083987259-172.17.0.2-1731361579165:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2083987259-172.17.0.2-1731361579165:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:32,810 WARN [ResponseProcessor for block BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:43031,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:32,810 WARN [ResponseProcessor for block BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:43031,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:32,810 WARN [DataStreamer for file /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta block BP-2083987259-172.17.0.2-1731361579165:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2083987259-172.17.0.2-1731361579165:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43031,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK], DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43031,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]) is bad. 2024-11-11T21:46:32,810 WARN [DataStreamer for file /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 block BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK], DatanodeInfoWithStorage[127.0.0.1:43031,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43031,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]) is bad. 2024-11-11T21:46:32,810 WARN [PacketResponder: BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43031] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,810 WARN [DataStreamer for file /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 block BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK], DatanodeInfoWithStorage[127.0.0.1:43031,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43031,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]) is bad. 2024-11-11T21:46:32,810 WARN [PacketResponder: BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43031] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_869443960_22 at /127.0.0.1:45458 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45458 dst: /127.0.0.1:37601 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,811 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_869443960_22 at /127.0.0.1:58954 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43031:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58954 dst: /127.0.0.1:43031 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,811 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1743487700_22 at /127.0.0.1:45478 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45478 dst: /127.0.0.1:37601 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,811 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1743487700_22 at /127.0.0.1:58986 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43031:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58986 dst: /127.0.0.1:43031 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,811 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1743487700_22 at /127.0.0.1:45480 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45480 dst: /127.0.0.1:37601 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,812 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1743487700_22 at /127.0.0.1:58972 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43031:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58972 dst: /127.0.0.1:43031 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@37b7928{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:32,814 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@671953b5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:32,814 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:32,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11255fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:32,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61f9169f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:32,817 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:46:32,817 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:46:32,817 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2083987259-172.17.0.2-1731361579165 (Datanode Uuid 0de43f7f-7633-4233-b6cd-9dcd438b25bc) service to localhost/127.0.0.1:34237 2024-11-11T21:46:32,817 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:46:32,817 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data3/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:32,818 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data4/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:32,818 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:46:32,829 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:32,834 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:32,835 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:32,835 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:32,835 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T21:46:32,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bd6b006{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:32,836 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5960bf29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:32,930 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21e6ff2c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/java.io.tmpdir/jetty-localhost-44045-hadoop-hdfs-3_4_1-tests_jar-_-any-18232474603764456410/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:32,930 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36974255{HTTP/1.1, (http/1.1)}{localhost:44045} 2024-11-11T21:46:32,930 INFO [Time-limited test {}] server.Server(415): Started @167330ms 2024-11-11T21:46:32,932 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:46:32,953 WARN [ResponseProcessor for block BP-2083987259-172.17.0.2-1731361579165:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2083987259-172.17.0.2-1731361579165:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:32,953 WARN [ResponseProcessor for block BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:32,953 WARN [ResponseProcessor for block BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:32,954 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1743487700_22 at /127.0.0.1:58600 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58600 dst: /127.0.0.1:37601 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,954 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_869443960_22 at /127.0.0.1:58628 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58628 dst: /127.0.0.1:37601 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,955 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1743487700_22 at /127.0.0.1:58602 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37601:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58602 dst: /127.0.0.1:37601 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:32,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@482b9b0f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:32,958 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b6caabd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:32,958 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:32,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25f63c50{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:32,958 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e3b8424{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:32,960 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:46:32,960 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:46:32,960 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2083987259-172.17.0.2-1731361579165 (Datanode Uuid 6a6f40a7-2edc-45d9-b369-02cae246d62a) service to localhost/127.0.0.1:34237 2024-11-11T21:46:32,960 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:46:32,961 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data1/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:32,961 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data2/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:32,961 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:46:32,976 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:32,981 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:32,982 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:32,982 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:32,982 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:46:32,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ad6e154{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:32,983 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@176f5faa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:33,018 WARN [Thread-1337 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:46:33,020 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2db351fb11bb5017 with lease ID 0x69ad2e16e342285c: from storage DS-ecec349a-94e7-415a-8c65-f68821dff2b1 node DatanodeRegistration(127.0.0.1:33219, datanodeUuid=0de43f7f-7633-4233-b6cd-9dcd438b25bc, infoPort=42395, infoSecurePort=0, ipcPort=45841, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:33,020 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2db351fb11bb5017 with lease ID 0x69ad2e16e342285c: from storage DS-b5acc3a9-e542-4229-b6d1-b0871983bb12 node DatanodeRegistration(127.0.0.1:33219, datanodeUuid=0de43f7f-7633-4233-b6cd-9dcd438b25bc, infoPort=42395, infoSecurePort=0, ipcPort=45841, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:33,092 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d1373c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/java.io.tmpdir/jetty-localhost-36581-hadoop-hdfs-3_4_1-tests_jar-_-any-6833891778745755005/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:33,093 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43a7cd{HTTP/1.1, (http/1.1)}{localhost:36581} 2024-11-11T21:46:33,093 INFO [Time-limited test {}] server.Server(415): Started @167492ms 2024-11-11T21:46:33,094 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:46:33,220 WARN [Thread-1368 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:46:33,223 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8806f2499c4ccedd with lease ID 0x69ad2e16e342285d: from storage DS-5ff179da-ea95-46a2-8236-cad9b6cb1965 node DatanodeRegistration(127.0.0.1:43501, datanodeUuid=6a6f40a7-2edc-45d9-b369-02cae246d62a, infoPort=43779, infoSecurePort=0, ipcPort=32869, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:33,223 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8806f2499c4ccedd with lease ID 0x69ad2e16e342285d: from storage DS-bd3615d3-a419-4744-8bce-9147d0167f89 node DatanodeRegistration(127.0.0.1:43501, datanodeUuid=6a6f40a7-2edc-45d9-b369-02cae246d62a, infoPort=43779, infoSecurePort=0, ipcPort=32869, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:33,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:33,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:33,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:34,170 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-11T21:46:34,175 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-11T21:46:34,178 ERROR [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9-prefix:22a1f13b90b0,43249,1731361579783 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:34,179 WARN [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9-prefix:22a1f13b90b0,43249,1731361579783 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:34,179 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C43249%2C1731361579783:(num 1731361580179) roll requested 2024-11-11T21:46:34,180 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C43249%2C1731361579783.1731361594179 2024-11-11T21:46:34,186 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 newFile=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 2024-11-11T21:46:34,186 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:34,187 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:34,187 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:34,187 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:34,187 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:34,187 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 2024-11-11T21:46:34,187 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:34,188 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:34,188 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 2024-11-11T21:46:34,188 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43779:43779),(127.0.0.1/127.0.0.1:42395:42395)] 2024-11-11T21:46:34,188 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 is not closed yet, will try archiving it next time 2024-11-11T21:46:34,188 WARN [IPC Server handler 2 on default port 34237 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-11T21:46:34,189 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 after 0ms 2024-11-11T21:46:34,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:34,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:34,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:35,023 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T21:46:35,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:35,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:35,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:36,195 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-11T21:46:36,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:36,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:36,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:37,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:37,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:37,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:38,190 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 after 4002ms 2024-11-11T21:46:38,200 WARN [ResponseProcessor for block BP-2083987259-172.17.0.2-1731361579165:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2083987259-172.17.0.2-1731361579165:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:38,201 WARN [DataStreamer for file /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 block BP-2083987259-172.17.0.2-1731361579165:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2083987259-172.17.0.2-1731361579165:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43501,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK], DatanodeInfoWithStorage[127.0.0.1:33219,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43501,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]) is bad. 2024-11-11T21:46:38,202 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1743487700_22 at /127.0.0.1:60720 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43501:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60720 dst: /127.0.0.1:43501 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:38,203 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1743487700_22 at /127.0.0.1:43754 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43754 dst: /127.0.0.1:33219 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:38,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d1373c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:38,209 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43a7cd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:38,209 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:38,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@176f5faa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:38,210 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ad6e154{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:38,211 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:46:38,211 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:46:38,211 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:46:38,211 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2083987259-172.17.0.2-1731361579165 (Datanode Uuid 6a6f40a7-2edc-45d9-b369-02cae246d62a) service to localhost/127.0.0.1:34237 2024-11-11T21:46:38,212 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data1/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:38,213 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data2/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:38,213 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:46:38,219 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:38,223 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:38,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:38,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:38,224 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:46:38,224 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e979747{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:38,224 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@718ea2f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:38,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35808fda{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/java.io.tmpdir/jetty-localhost-32811-hadoop-hdfs-3_4_1-tests_jar-_-any-17926869108083202830/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:38,316 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57d5f4b3{HTTP/1.1, (http/1.1)}{localhost:32811} 2024-11-11T21:46:38,316 INFO [Time-limited test {}] server.Server(415): Started @172716ms 2024-11-11T21:46:38,317 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:46:38,334 WARN [ResponseProcessor for block BP-2083987259-172.17.0.2-1731361579165:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2083987259-172.17.0.2-1731361579165:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:38,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1743487700_22 at /127.0.0.1:43770 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43770 dst: /127.0.0.1:33219 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:38,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21e6ff2c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:38,338 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36974255{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:38,338 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:38,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5960bf29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:38,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bd6b006{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:38,339 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:46:38,339 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:46:38,339 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2083987259-172.17.0.2-1731361579165 (Datanode Uuid 0de43f7f-7633-4233-b6cd-9dcd438b25bc) service to localhost/127.0.0.1:34237 2024-11-11T21:46:38,339 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:46:38,340 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data3/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:38,340 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data4/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:38,340 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:46:38,353 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:38,357 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:38,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:38,362 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:38,362 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:46:38,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4022a798{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:38,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@760c54cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:38,392 WARN [Thread-1411 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:46:38,394 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1523d83ae7b621a6 with lease ID 0x69ad2e16e342285e: from storage DS-5ff179da-ea95-46a2-8236-cad9b6cb1965 node DatanodeRegistration(127.0.0.1:45675, datanodeUuid=6a6f40a7-2edc-45d9-b369-02cae246d62a, infoPort=45237, infoSecurePort=0, ipcPort=44787, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:38,395 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1523d83ae7b621a6 with lease ID 0x69ad2e16e342285e: from storage DS-bd3615d3-a419-4744-8bce-9147d0167f89 node DatanodeRegistration(127.0.0.1:45675, datanodeUuid=6a6f40a7-2edc-45d9-b369-02cae246d62a, infoPort=45237, infoSecurePort=0, ipcPort=44787, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:38,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e29ac6f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/java.io.tmpdir/jetty-localhost-35607-hadoop-hdfs-3_4_1-tests_jar-_-any-16538711376041489565/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:38,462 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64685bd7{HTTP/1.1, (http/1.1)}{localhost:35607} 2024-11-11T21:46:38,462 INFO [Time-limited test {}] server.Server(415): Started @172861ms 2024-11-11T21:46:38,464 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:46:38,530 WARN [Thread-1442 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:46:38,532 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f7a4a8936b7740f with lease ID 0x69ad2e16e342285f: from storage DS-ecec349a-94e7-415a-8c65-f68821dff2b1 node DatanodeRegistration(127.0.0.1:44445, datanodeUuid=0de43f7f-7633-4233-b6cd-9dcd438b25bc, infoPort=35227, infoSecurePort=0, ipcPort=46425, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f7a4a8936b7740f with lease ID 0x69ad2e16e342285f: from storage DS-b5acc3a9-e542-4229-b6d1-b0871983bb12 node DatanodeRegistration(127.0.0.1:44445, datanodeUuid=0de43f7f-7633-4233-b6cd-9dcd438b25bc, infoPort=35227, infoSecurePort=0, ipcPort=46425, storageInfo=lv=-57;cid=testClusterID;nsid=1672974333;c=1731361579165), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:38,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:38,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:38,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:39,482 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-11T21:46:39,487 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-11T21:46:39,490 ERROR [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9-prefix:22a1f13b90b0,43249,1731361579783 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33219,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:39,490 WARN [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9-prefix:22a1f13b90b0,43249,1731361579783 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33219,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:39,490 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C43249%2C1731361579783:(num 1731361594179) roll requested 2024-11-11T21:46:39,491 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C43249%2C1731361579783.1731361599490 2024-11-11T21:46:39,498 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 newFile=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361599490 2024-11-11T21:46:39,499 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:39,499 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:39,499 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:39,499 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:39,499 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:39,499 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361599490 2024-11-11T21:46:39,499 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33219,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:39,500 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33219,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:39,500 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 2024-11-11T21:46:39,500 WARN [IPC Server handler 0 on default port 34237 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-11T21:46:39,501 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 after 0ms 2024-11-11T21:46:39,501 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35227:35227),(127.0.0.1/127.0.0.1:45237:45237)] 2024-11-11T21:46:39,501 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 is not closed yet, will try archiving it next time 2024-11-11T21:46:39,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:39,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:39,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:40,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:40,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:40,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:41,503 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C43249%2C1731361579783.1731361601502 2024-11-11T21:46:41,515 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361599490 newFile=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 2024-11-11T21:46:41,515 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:41,516 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:41,516 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:41,516 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:41,516 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:41,516 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361599490 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 2024-11-11T21:46:41,518 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35227:35227),(127.0.0.1/127.0.0.1:45237:45237)] 2024-11-11T21:46:41,518 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 is not closed yet, will try archiving it next time 2024-11-11T21:46:41,518 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361599490 is not closed yet, will try archiving it next time 2024-11-11T21:46:41,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741838_1019 (size=1264) 2024-11-11T21:46:41,519 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 2024-11-11T21:46:41,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741838_1019 (size=1264) 2024-11-11T21:46:41,519 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 2024-11-11T21:46:41,519 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 after 0ms 2024-11-11T21:46:41,519 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 2024-11-11T21:46:41,519 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 is not closed yet, will try archiving it next time 2024-11-11T21:46:41,529 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731361581080/Put/vlen=218/seqid=0] 2024-11-11T21:46:41,530 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731361590807/Put/vlen=1045/seqid=0] 2024-11-11T21:46:41,530 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361580179 2024-11-11T21:46:41,530 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 2024-11-11T21:46:41,530 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 2024-11-11T21:46:41,530 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 after 0ms 2024-11-11T21:46:41,530 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 2024-11-11T21:46:41,534 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731361594178/Put/vlen=1045/seqid=0] 2024-11-11T21:46:41,534 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731361596197/Put/vlen=1045/seqid=0] 2024-11-11T21:46:41,535 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 2024-11-11T21:46:41,535 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361599490 2024-11-11T21:46:41,535 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361599490 2024-11-11T21:46:41,535 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361599490 after 0ms 2024-11-11T21:46:41,535 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361599490 2024-11-11T21:46:41,538 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731361599489/Put/vlen=1045/seqid=0] 2024-11-11T21:46:41,538 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 2024-11-11T21:46:41,538 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 2024-11-11T21:46:41,539 WARN [IPC Server handler 1 on default port 34237 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-11T21:46:41,539 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 after 1ms 2024-11-11T21:46:41,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:41,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:41,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:42,537 WARN [ResponseProcessor for block BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:42,537 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_869443960_22 at /127.0.0.1:49358 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44445:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49358 dst: /127.0.0.1:44445 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44445 remote=/127.0.0.1:49358]. Total timeout mills is 60000, 58978 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:42,538 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_869443960_22 at /127.0.0.1:34560 [Receiving block BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45675:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34560 dst: /127.0.0.1:45675 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:46:42,538 WARN [DataStreamer for file /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 block BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44445,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK], DatanodeInfoWithStorage[127.0.0.1:45675,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44445,DS-ecec349a-94e7-415a-8c65-f68821dff2b1,DISK]) is bad. 2024-11-11T21:46:42,541 WARN [DataStreamer for file /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 block BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:42,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741839_1022 (size=85) 2024-11-11T21:46:42,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741839_1022 (size=85) 2024-11-11T21:46:42,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:42,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:42,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:43,397 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T21:46:43,503 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361594179 after 4002ms 2024-11-11T21:46:43,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:43,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:43,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:44,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:44,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:44,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:45,541 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 after 4002ms 2024-11-11T21:46:45,541 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 2024-11-11T21:46:45,549 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 2024-11-11T21:46:45,549 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing bf1e6c0d21761d49775a874935def3d2 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-11T21:46:45,550 ERROR [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9-prefix:22a1f13b90b0,43249,1731361579783 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:45,550 WARN [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9-prefix:22a1f13b90b0,43249,1731361579783 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:45,550 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C43249%2C1731361579783:(num 1731361601502) roll requested 2024-11-11T21:46:45,551 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C43249%2C1731361579783.1731361605551 2024-11-11T21:46:45,556 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 newFile=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361605551 2024-11-11T21:46:45,556 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,556 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,556 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,556 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,557 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361605551 2024-11-11T21:46:45,557 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:45,557 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2083987259-172.17.0.2-1731361579165:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:45,557 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 2024-11-11T21:46:45,558 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35227:35227),(127.0.0.1/127.0.0.1:45237:45237)] 2024-11-11T21:46:45,558 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 is not closed yet, will try archiving it next time 2024-11-11T21:46:45,558 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 after 1ms 2024-11-11T21:46:45,559 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 to hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/oldWALs/22a1f13b90b0%2C43249%2C1731361579783.1731361601502 2024-11-11T21:46:45,577 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/bf1e6c0d21761d49775a874935def3d2/.tmp/info/cc53c7daad474ee49decacd612c36c6c is 1080, key is row1002/info:/1731361590807/Put/seqid=0 2024-11-11T21:46:45,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741841_1024 (size=9270) 2024-11-11T21:46:45,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741841_1024 (size=9270) 2024-11-11T21:46:45,582 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/bf1e6c0d21761d49775a874935def3d2/.tmp/info/cc53c7daad474ee49decacd612c36c6c 2024-11-11T21:46:45,587 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/bf1e6c0d21761d49775a874935def3d2/.tmp/info/cc53c7daad474ee49decacd612c36c6c as hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/bf1e6c0d21761d49775a874935def3d2/info/cc53c7daad474ee49decacd612c36c6c 2024-11-11T21:46:45,593 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/bf1e6c0d21761d49775a874935def3d2/info/cc53c7daad474ee49decacd612c36c6c, entries=4, sequenceid=8, filesize=9.1 K 2024-11-11T21:46:45,595 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for bf1e6c0d21761d49775a874935def3d2 in 46ms, sequenceid=8, compaction requested=false 2024-11-11T21:46:45,595 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for bf1e6c0d21761d49775a874935def3d2: 2024-11-11T21:46:45,595 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-11T21:46:45,595 ERROR [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9-prefix:22a1f13b90b0,43249,1731361579783.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:45,595 WARN [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9-prefix:22a1f13b90b0,43249,1731361579783.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:45,595 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C43249%2C1731361579783.meta:.meta(num 1731361580542) roll requested 2024-11-11T21:46:45,596 INFO [regionserver/22a1f13b90b0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C43249%2C1731361579783.meta.1731361605596.meta 2024-11-11T21:46:45,601 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,601 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,601 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,601 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,601 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361605596.meta 2024-11-11T21:46:45,601 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:45,602 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:45,602 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta 2024-11-11T21:46:45,602 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45237:45237),(127.0.0.1/127.0.0.1:35227:35227)] 2024-11-11T21:46:45,602 DEBUG [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta is not closed yet, will try archiving it next time 2024-11-11T21:46:45,602 WARN [IPC Server handler 3 on default port 34237 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-11T21:46:45,602 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta after 0ms 2024-11-11T21:46:45,616 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/.tmp/info/0cd72c6562ea48d59898ea95147b9356 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2./info:regioninfo/1731361581086/Put/seqid=0 2024-11-11T21:46:45,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741843_1027 (size=7125) 2024-11-11T21:46:45,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741843_1027 (size=7125) 2024-11-11T21:46:45,621 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/.tmp/info/0cd72c6562ea48d59898ea95147b9356 2024-11-11T21:46:45,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:45,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:45,639 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/.tmp/ns/4f0f9fa24a4f4da8bcf35e52503f156d is 43, key is default/ns:d/1731361580584/Put/seqid=0 2024-11-11T21:46:45,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741844_1028 (size=5153) 2024-11-11T21:46:45,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741844_1028 (size=5153) 2024-11-11T21:46:45,644 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/.tmp/ns/4f0f9fa24a4f4da8bcf35e52503f156d 2024-11-11T21:46:45,662 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/.tmp/table/175d025e24834de480b08dcdf93ac977 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731361581097/Put/seqid=0 2024-11-11T21:46:45,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741845_1029 (size=5438) 2024-11-11T21:46:45,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741845_1029 (size=5438) 2024-11-11T21:46:45,667 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/.tmp/table/175d025e24834de480b08dcdf93ac977 2024-11-11T21:46:45,673 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/.tmp/info/0cd72c6562ea48d59898ea95147b9356 as hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/info/0cd72c6562ea48d59898ea95147b9356 2024-11-11T21:46:45,679 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/info/0cd72c6562ea48d59898ea95147b9356, entries=10, sequenceid=11, filesize=7.0 K 2024-11-11T21:46:45,680 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/.tmp/ns/4f0f9fa24a4f4da8bcf35e52503f156d as hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/ns/4f0f9fa24a4f4da8bcf35e52503f156d 2024-11-11T21:46:45,685 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/ns/4f0f9fa24a4f4da8bcf35e52503f156d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T21:46:45,686 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/.tmp/table/175d025e24834de480b08dcdf93ac977 as hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/table/175d025e24834de480b08dcdf93ac977 2024-11-11T21:46:45,691 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/table/175d025e24834de480b08dcdf93ac977, entries=2, sequenceid=11, filesize=5.3 K 2024-11-11T21:46:45,693 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 97ms, sequenceid=11, compaction requested=false 2024-11-11T21:46:45,693 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T21:46:45,697 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T21:46:45,698 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:46:45,698 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:46:45,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:45,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:45,698 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T21:46:45,698 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T21:46:45,698 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1951379805, stopped=false 2024-11-11T21:46:45,698 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=22a1f13b90b0,45543,1731361579744 2024-11-11T21:46:45,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:46:45,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:46:45,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:45,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:45,699 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:46:45,700 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:46:45,700 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:46:45,700 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:45,700 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:46:45,700 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '22a1f13b90b0,43249,1731361579783' ***** 2024-11-11T21:46:45,700 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T21:46:45,700 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:46:45,700 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T21:46:45,700 INFO [RS:0;22a1f13b90b0:43249 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T21:46:45,700 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T21:46:45,700 INFO [RS:0;22a1f13b90b0:43249 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T21:46:45,700 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(3091): Received CLOSE for bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:45,701 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(959): stopping server 22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:45,701 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:46:45,701 INFO [RS:0;22a1f13b90b0:43249 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;22a1f13b90b0:43249. 2024-11-11T21:46:45,701 DEBUG [RS:0;22a1f13b90b0:43249 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:46:45,701 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bf1e6c0d21761d49775a874935def3d2, disabling compactions & flushes 2024-11-11T21:46:45,701 DEBUG [RS:0;22a1f13b90b0:43249 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:45,701 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:45,701 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T21:46:45,701 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:45,701 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T21:46:45,701 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T21:46:45,701 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. after waiting 0 ms 2024-11-11T21:46:45,701 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:45,701 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T21:46:45,701 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T21:46:45,701 DEBUG [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(1325): Online Regions={bf1e6c0d21761d49775a874935def3d2=TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T21:46:45,701 DEBUG [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, bf1e6c0d21761d49775a874935def3d2 2024-11-11T21:46:45,701 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:46:45,701 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:46:45,701 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:46:45,701 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:46:45,701 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:46:45,707 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T21:46:45,707 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/data/default/TestLogRolling-testLogRollOnPipelineRestart/bf1e6c0d21761d49775a874935def3d2/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-11T21:46:45,707 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:45,707 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:46:45,707 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bf1e6c0d21761d49775a874935def3d2: Waiting for close lock at 1731361605701Running coprocessor pre-close hooks at 1731361605701Disabling compacts and flushes for region at 1731361605701Disabling writes for close at 1731361605701Writing region close event to WAL at 1731361605703 (+2 ms)Running coprocessor post-close hooks at 1731361605707 (+4 ms)Closed at 1731361605707 2024-11-11T21:46:45,707 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:46:45,708 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361605701Running coprocessor pre-close hooks at 1731361605701Disabling compacts and flushes for region at 1731361605701Disabling writes for close at 1731361605701Writing region close event to WAL at 1731361605704 (+3 ms)Running coprocessor post-close hooks at 1731361605707 (+3 ms)Closed at 1731361605707 2024-11-11T21:46:45,708 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T21:46:45,708 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731361580714.bf1e6c0d21761d49775a874935def3d2. 2024-11-11T21:46:45,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:45,901 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(976): stopping server 22a1f13b90b0,43249,1731361579783; all regions closed. 2024-11-11T21:46:45,902 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,902 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,902 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,902 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,902 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:45,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741842_1025 (size=825) 2024-11-11T21:46:45,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741842_1025 (size=825) 2024-11-11T21:46:46,045 INFO [regionserver/22a1f13b90b0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:46:46,109 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T21:46:46,109 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T21:46:46,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:46,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:46,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:47,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:47,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:47,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:48,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:48,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:48,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:49,537 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T21:46:49,604 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta after 4002ms 2024-11-11T21:46:49,605 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/WALs/22a1f13b90b0,43249,1731361579783/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta to hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/oldWALs/22a1f13b90b0%2C43249%2C1731361579783.meta.1731361580542.meta 2024-11-11T21:46:49,614 DEBUG [RS:0;22a1f13b90b0:43249 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/oldWALs 2024-11-11T21:46:49,614 INFO [RS:0;22a1f13b90b0:43249 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C43249%2C1731361579783.meta:.meta(num 1731361605596) 2024-11-11T21:46:49,615 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,615 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,615 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,615 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,615 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741840_1023 (size=1162) 2024-11-11T21:46:49,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741840_1023 (size=1162) 2024-11-11T21:46:49,627 DEBUG [RS:0;22a1f13b90b0:43249 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/oldWALs 2024-11-11T21:46:49,627 INFO [RS:0;22a1f13b90b0:43249 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C43249%2C1731361579783:(num 1731361605551) 2024-11-11T21:46:49,627 DEBUG [RS:0;22a1f13b90b0:43249 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:49,627 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:46:49,627 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:46:49,627 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.ChoreService(370): Chore service for: regionserver/22a1f13b90b0:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T21:46:49,627 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:46:49,628 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:46:49,628 INFO [RS:0;22a1f13b90b0:43249 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43249 2024-11-11T21:46:49,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/22a1f13b90b0,43249,1731361579783 2024-11-11T21:46:49,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:46:49,630 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:46:49,630 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [22a1f13b90b0,43249,1731361579783] 2024-11-11T21:46:49,631 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/22a1f13b90b0,43249,1731361579783 already deleted, retry=false 2024-11-11T21:46:49,631 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 22a1f13b90b0,43249,1731361579783 expired; onlineServers=0 2024-11-11T21:46:49,631 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '22a1f13b90b0,45543,1731361579744' ***** 2024-11-11T21:46:49,631 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T21:46:49,631 INFO [M:0;22a1f13b90b0:45543 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:46:49,631 INFO [M:0;22a1f13b90b0:45543 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:46:49,631 DEBUG [M:0;22a1f13b90b0:45543 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T21:46:49,632 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T21:46:49,632 DEBUG [M:0;22a1f13b90b0:45543 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T21:46:49,632 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361579921 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361579921,5,FailOnTimeoutGroup] 2024-11-11T21:46:49,632 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361579921 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361579921,5,FailOnTimeoutGroup] 2024-11-11T21:46:49,632 INFO [M:0;22a1f13b90b0:45543 {}] hbase.ChoreService(370): Chore service for: master/22a1f13b90b0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T21:46:49,632 INFO [M:0;22a1f13b90b0:45543 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:46:49,632 DEBUG [M:0;22a1f13b90b0:45543 {}] master.HMaster(1795): Stopping service threads 2024-11-11T21:46:49,632 INFO [M:0;22a1f13b90b0:45543 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T21:46:49,632 INFO [M:0;22a1f13b90b0:45543 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:46:49,632 INFO [M:0;22a1f13b90b0:45543 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T21:46:49,632 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T21:46:49,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T21:46:49,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:49,633 DEBUG [M:0;22a1f13b90b0:45543 {}] zookeeper.ZKUtil(347): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T21:46:49,633 WARN [M:0;22a1f13b90b0:45543 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T21:46:49,633 INFO [M:0;22a1f13b90b0:45543 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/.lastflushedseqids 2024-11-11T21:46:49,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741846_1030 (size=130) 2024-11-11T21:46:49,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741846_1030 (size=130) 2024-11-11T21:46:49,638 INFO [M:0;22a1f13b90b0:45543 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T21:46:49,638 INFO [M:0;22a1f13b90b0:45543 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T21:46:49,639 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:46:49,639 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:49,639 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:49,639 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:46:49,639 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:49,639 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-11T21:46:49,639 ERROR [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData-prefix:22a1f13b90b0,45543,1731361579744 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:49,639 WARN [FSHLog-0-hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData-prefix:22a1f13b90b0,45543,1731361579744 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:49,639 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 22a1f13b90b0%2C45543%2C1731361579744:(num 1731361579863) roll requested 2024-11-11T21:46:49,640 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C45543%2C1731361579744.1731361609639 2024-11-11T21:46:49,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:49,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:49,644 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,644 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,644 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,644 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,644 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,645 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361609639 2024-11-11T21:46:49,645 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:49,645 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37601,DS-5ff179da-ea95-46a2-8236-cad9b6cb1965,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-11T21:46:49,645 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 2024-11-11T21:46:49,645 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35227:35227),(127.0.0.1/127.0.0.1:45237:45237)] 2024-11-11T21:46:49,645 WARN [IPC Server handler 1 on default port 34237 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-11T21:46:49,645 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 is not closed yet, will try archiving it next time 2024-11-11T21:46:49,646 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 after 1ms 2024-11-11T21:46:49,662 DEBUG [M:0;22a1f13b90b0:45543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17cfced68aa44172a884a26d60864bf0 is 82, key is hbase:meta,,1/info:regioninfo/1731361580571/Put/seqid=0 2024-11-11T21:46:49,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741848_1033 (size=5672) 2024-11-11T21:46:49,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741848_1033 (size=5672) 2024-11-11T21:46:49,667 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17cfced68aa44172a884a26d60864bf0 2024-11-11T21:46:49,686 DEBUG [M:0;22a1f13b90b0:45543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fcb7e236bb354623847f0455c40b0c38 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731361581101/Put/seqid=0 2024-11-11T21:46:49,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741849_1034 (size=6119) 2024-11-11T21:46:49,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741849_1034 (size=6119) 2024-11-11T21:46:49,691 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fcb7e236bb354623847f0455c40b0c38 2024-11-11T21:46:49,710 DEBUG [M:0;22a1f13b90b0:45543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8f6dba0a108e4cd399652523739dfaf9 is 69, key is 22a1f13b90b0,43249,1731361579783/rs:state/1731361580030/Put/seqid=0 2024-11-11T21:46:49,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741850_1035 (size=5156) 2024-11-11T21:46:49,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741850_1035 (size=5156) 2024-11-11T21:46:49,715 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8f6dba0a108e4cd399652523739dfaf9 2024-11-11T21:46:49,724 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T21:46:49,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:49,731 INFO [RS:0;22a1f13b90b0:43249 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:46:49,731 INFO [RS:0;22a1f13b90b0:43249 {}] regionserver.HRegionServer(1031): Exiting; stopping=22a1f13b90b0,43249,1731361579783; zookeeper connection closed. 2024-11-11T21:46:49,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43249-0x10030c07f3d0001, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:49,731 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@343fc60a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@343fc60a 2024-11-11T21:46:49,731 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T21:46:49,736 DEBUG [M:0;22a1f13b90b0:45543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/78f697b531c74ac9b962a47eef37ee1b is 52, key is load_balancer_on/state:d/1731361580709/Put/seqid=0 2024-11-11T21:46:49,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741851_1036 (size=5056) 2024-11-11T21:46:49,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741851_1036 (size=5056) 2024-11-11T21:46:49,741 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/78f697b531c74ac9b962a47eef37ee1b 2024-11-11T21:46:49,747 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17cfced68aa44172a884a26d60864bf0 as hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17cfced68aa44172a884a26d60864bf0 2024-11-11T21:46:49,752 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17cfced68aa44172a884a26d60864bf0, entries=8, sequenceid=56, filesize=5.5 K 2024-11-11T21:46:49,754 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fcb7e236bb354623847f0455c40b0c38 as hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fcb7e236bb354623847f0455c40b0c38 2024-11-11T21:46:49,759 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fcb7e236bb354623847f0455c40b0c38, entries=6, sequenceid=56, filesize=6.0 K 2024-11-11T21:46:49,760 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8f6dba0a108e4cd399652523739dfaf9 as hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8f6dba0a108e4cd399652523739dfaf9 2024-11-11T21:46:49,766 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8f6dba0a108e4cd399652523739dfaf9, entries=1, sequenceid=56, filesize=5.0 K 2024-11-11T21:46:49,767 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/78f697b531c74ac9b962a47eef37ee1b as hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/78f697b531c74ac9b962a47eef37ee1b 2024-11-11T21:46:49,771 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/78f697b531c74ac9b962a47eef37ee1b, entries=1, sequenceid=56, filesize=4.9 K 2024-11-11T21:46:49,773 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=56, compaction requested=false 2024-11-11T21:46:49,774 INFO [M:0;22a1f13b90b0:45543 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:49,774 DEBUG [M:0;22a1f13b90b0:45543 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361609639Disabling compacts and flushes for region at 1731361609639Disabling writes for close at 1731361609639Obtaining lock to block concurrent updates at 1731361609639Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731361609639Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731361609639Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731361609646 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731361609646Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731361609661 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731361609661Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731361609672 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731361609686 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731361609686Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731361609695 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731361609710 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731361609710Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731361609719 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731361609735 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731361609735Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2cdc96bd: reopening flushed file at 1731361609746 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a933eee: reopening flushed file at 1731361609752 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38b8ef51: reopening flushed file at 1731361609759 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7261aa24: reopening flushed file at 1731361609766 (+7 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=56, compaction requested=false at 1731361609773 (+7 ms)Writing region close event to WAL at 1731361609774 (+1 ms)Closed at 1731361609774 2024-11-11T21:46:49,775 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,775 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,775 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,775 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,775 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:46:49,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45675 is added to blk_1073741847_1031 (size=757) 2024-11-11T21:46:49,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44445 is added to blk_1073741847_1031 (size=757) 2024-11-11T21:46:49,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:50,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:50,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:50,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:50,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:51,246 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T21:46:51,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,251 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,276 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:51,463 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:46:51,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T21:46:51,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T21:46:51,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-11T21:46:51,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:51,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:51,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:52,538 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-11T21:46:52,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:52,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:52,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:53,647 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 after 4002ms 2024-11-11T21:46:53,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:53,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:53,649 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/WALs/22a1f13b90b0,45543,1731361579744/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 to hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/oldWALs/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 2024-11-11T21:46:53,658 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/MasterData/oldWALs/22a1f13b90b0%2C45543%2C1731361579744.1731361579863 to hdfs://localhost:34237/user/jenkins/test-data/4211e341-5f19-4353-d065-4c228e2a33f9/oldWALs/22a1f13b90b0%2C45543%2C1731361579744.1731361579863$masterlocalwal$ 2024-11-11T21:46:53,658 INFO [M:0;22a1f13b90b0:45543 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T21:46:53,658 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:46:53,658 INFO [M:0;22a1f13b90b0:45543 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45543 2024-11-11T21:46:53,659 INFO [M:0;22a1f13b90b0:45543 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:46:53,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:53,761 INFO [M:0;22a1f13b90b0:45543 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:46:53,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45543-0x10030c07f3d0000, quorum=127.0.0.1:64425, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:46:53,766 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e29ac6f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:53,767 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64685bd7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:53,767 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:53,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@760c54cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:53,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4022a798{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:53,770 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:46:53,770 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:46:53,770 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2083987259-172.17.0.2-1731361579165 (Datanode Uuid 0de43f7f-7633-4233-b6cd-9dcd438b25bc) service to localhost/127.0.0.1:34237 2024-11-11T21:46:53,770 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:46:53,771 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data3/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:53,771 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data4/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:53,771 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:46:53,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35808fda{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:53,773 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57d5f4b3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:53,773 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:53,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@718ea2f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:53,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e979747{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:53,774 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:46:53,774 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:46:53,774 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:46:53,774 WARN [BP-2083987259-172.17.0.2-1731361579165 heartbeating to localhost/127.0.0.1:34237 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2083987259-172.17.0.2-1731361579165 (Datanode Uuid 6a6f40a7-2edc-45d9-b369-02cae246d62a) service to localhost/127.0.0.1:34237 2024-11-11T21:46:53,774 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data1/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:53,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/cluster_60d9d42a-cfe0-df6b-27e0-bb097ba8c11a/data/data2/current/BP-2083987259-172.17.0.2-1731361579165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:46:53,775 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:46:53,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10a92d53{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:46:53,780 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ddfefdc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:46:53,780 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:46:53,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bd03e52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:46:53,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a58a89e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir/,STOPPED} 2024-11-11T21:46:53,787 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T21:46:53,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:53,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T21:46:53,814 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34237 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34237 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34237 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:34237 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34237 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34237 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34237 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34237 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=457 (was 440) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=61 (was 110), ProcessCount=11 (was 11), AvailableMemoryMB=4209 (was 4453) 2024-11-11T21:46:53,820 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=61, ProcessCount=11, AvailableMemoryMB=4209 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.log.dir so I do NOT create it in target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0600b998-e857-a43e-c167-8c11ea931226/hadoop.tmp.dir so I do NOT create it in target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4, deleteOnExit=true 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/test.cache.data in system properties and HBase conf 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.log.dir in system properties and HBase conf 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T21:46:53,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T21:46:53,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T21:46:53,822 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T21:46:53,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:46:53,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:46:53,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T21:46:53,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:46:53,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T21:46:53,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T21:46:53,822 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:46:53,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:46:53,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T21:46:53,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/nfs.dump.dir in system properties and HBase conf 2024-11-11T21:46:53,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/java.io.tmpdir in system properties and HBase conf 2024-11-11T21:46:53,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:46:53,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T21:46:53,823 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T21:46:53,837 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:46:53,883 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:53,887 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:53,888 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:53,888 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:53,889 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:46:53,889 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:53,890 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a7d1da5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:53,890 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@438136f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:53,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16208fe2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/java.io.tmpdir/jetty-localhost-37709-hadoop-hdfs-3_4_1-tests_jar-_-any-963986928913239360/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:46:53,982 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a5372a5{HTTP/1.1, (http/1.1)}{localhost:37709} 2024-11-11T21:46:53,982 INFO [Time-limited test {}] server.Server(415): Started @188382ms 2024-11-11T21:46:53,993 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:46:54,028 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:54,031 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:54,032 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:54,032 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:54,032 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:46:54,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@214c2124{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:54,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f02078{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:54,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4114613b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/java.io.tmpdir/jetty-localhost-40165-hadoop-hdfs-3_4_1-tests_jar-_-any-6736528266756732707/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:54,127 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21edc7fd{HTTP/1.1, (http/1.1)}{localhost:40165} 2024-11-11T21:46:54,127 INFO [Time-limited test {}] server.Server(415): Started @188526ms 2024-11-11T21:46:54,128 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:46:54,152 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:46:54,155 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:46:54,156 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:46:54,156 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:46:54,156 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:46:54,156 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e470e04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:46:54,156 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4732430a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:46:54,181 WARN [Thread-1636 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/data/data1/current/BP-614608524-172.17.0.2-1731361613846/current, will proceed with Du for space computation calculation, 2024-11-11T21:46:54,181 WARN [Thread-1637 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/data/data2/current/BP-614608524-172.17.0.2-1731361613846/current, will proceed with Du for space computation calculation, 2024-11-11T21:46:54,198 WARN [Thread-1615 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:46:54,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x277c44184443fdb1 with lease ID 0x5b25e6717a737067: Processing first storage report for DS-afd1e3ae-5a6a-439b-b65b-b63138159ef0 from datanode DatanodeRegistration(127.0.0.1:35597, datanodeUuid=5a4c6fab-11bb-4f6b-853b-3fdc6147bdf7, infoPort=40917, infoSecurePort=0, ipcPort=36655, storageInfo=lv=-57;cid=testClusterID;nsid=1350391340;c=1731361613846) 2024-11-11T21:46:54,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x277c44184443fdb1 with lease ID 0x5b25e6717a737067: from storage DS-afd1e3ae-5a6a-439b-b65b-b63138159ef0 node DatanodeRegistration(127.0.0.1:35597, datanodeUuid=5a4c6fab-11bb-4f6b-853b-3fdc6147bdf7, infoPort=40917, infoSecurePort=0, ipcPort=36655, storageInfo=lv=-57;cid=testClusterID;nsid=1350391340;c=1731361613846), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:54,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x277c44184443fdb1 with lease ID 0x5b25e6717a737067: Processing first storage report for DS-e08eb2f5-c12b-4c67-888a-c5816dbd0ecd from datanode DatanodeRegistration(127.0.0.1:35597, datanodeUuid=5a4c6fab-11bb-4f6b-853b-3fdc6147bdf7, infoPort=40917, infoSecurePort=0, ipcPort=36655, storageInfo=lv=-57;cid=testClusterID;nsid=1350391340;c=1731361613846) 2024-11-11T21:46:54,201 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x277c44184443fdb1 with lease ID 0x5b25e6717a737067: from storage DS-e08eb2f5-c12b-4c67-888a-c5816dbd0ecd node DatanodeRegistration(127.0.0.1:35597, datanodeUuid=5a4c6fab-11bb-4f6b-853b-3fdc6147bdf7, infoPort=40917, infoSecurePort=0, ipcPort=36655, storageInfo=lv=-57;cid=testClusterID;nsid=1350391340;c=1731361613846), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:54,251 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@677a249b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/java.io.tmpdir/jetty-localhost-44783-hadoop-hdfs-3_4_1-tests_jar-_-any-15342592489436115395/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:46:54,252 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e21aaf2{HTTP/1.1, (http/1.1)}{localhost:44783} 2024-11-11T21:46:54,252 INFO [Time-limited test {}] server.Server(415): Started @188651ms 2024-11-11T21:46:54,253 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:46:54,306 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/data/data3/current/BP-614608524-172.17.0.2-1731361613846/current, will proceed with Du for space computation calculation, 2024-11-11T21:46:54,306 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/data/data4/current/BP-614608524-172.17.0.2-1731361613846/current, will proceed with Du for space computation calculation, 2024-11-11T21:46:54,323 WARN [Thread-1651 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:46:54,325 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b27be4146a7c82f with lease ID 0x5b25e6717a737068: Processing first storage report for DS-e1ecf9c3-07ff-4a50-8867-2d4b5a3a8cf0 from datanode DatanodeRegistration(127.0.0.1:42237, datanodeUuid=078222af-057b-4223-94e8-67d37ec73a4d, infoPort=37713, infoSecurePort=0, ipcPort=33445, storageInfo=lv=-57;cid=testClusterID;nsid=1350391340;c=1731361613846) 2024-11-11T21:46:54,325 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b27be4146a7c82f with lease ID 0x5b25e6717a737068: from storage DS-e1ecf9c3-07ff-4a50-8867-2d4b5a3a8cf0 node DatanodeRegistration(127.0.0.1:42237, datanodeUuid=078222af-057b-4223-94e8-67d37ec73a4d, infoPort=37713, infoSecurePort=0, ipcPort=33445, storageInfo=lv=-57;cid=testClusterID;nsid=1350391340;c=1731361613846), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:54,325 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b27be4146a7c82f with lease ID 0x5b25e6717a737068: Processing first storage report for DS-3c28bdd8-baa0-4f66-8a84-119769aac1b7 from datanode DatanodeRegistration(127.0.0.1:42237, datanodeUuid=078222af-057b-4223-94e8-67d37ec73a4d, infoPort=37713, infoSecurePort=0, ipcPort=33445, storageInfo=lv=-57;cid=testClusterID;nsid=1350391340;c=1731361613846) 2024-11-11T21:46:54,325 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b27be4146a7c82f with lease ID 0x5b25e6717a737068: from storage DS-3c28bdd8-baa0-4f66-8a84-119769aac1b7 node DatanodeRegistration(127.0.0.1:42237, datanodeUuid=078222af-057b-4223-94e8-67d37ec73a4d, infoPort=37713, infoSecurePort=0, ipcPort=33445, storageInfo=lv=-57;cid=testClusterID;nsid=1350391340;c=1731361613846), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:46:54,377 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9 2024-11-11T21:46:54,379 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/zookeeper_0, clientPort=56308, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T21:46:54,380 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56308 2024-11-11T21:46:54,380 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:54,382 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:54,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:46:54,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:46:54,392 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c with version=8 2024-11-11T21:46:54,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/hbase-staging 2024-11-11T21:46:54,394 INFO [Time-limited test {}] client.ConnectionUtils(128): master/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:46:54,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:54,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:54,394 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:46:54,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:54,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:46:54,395 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T21:46:54,395 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:46:54,395 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44805 2024-11-11T21:46:54,397 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44805 connecting to ZooKeeper ensemble=127.0.0.1:56308 2024-11-11T21:46:54,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:448050x0, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:46:54,401 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44805-0x10030c106990000 connected 2024-11-11T21:46:54,416 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:54,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:54,419 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:46:54,419 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c, hbase.cluster.distributed=false 2024-11-11T21:46:54,420 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:46:54,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44805 2024-11-11T21:46:54,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44805 2024-11-11T21:46:54,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44805 2024-11-11T21:46:54,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44805 2024-11-11T21:46:54,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44805 2024-11-11T21:46:54,438 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:46:54,438 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:54,438 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:54,438 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:46:54,438 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:46:54,438 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:46:54,439 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T21:46:54,439 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:46:54,439 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41445 2024-11-11T21:46:54,441 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41445 connecting to ZooKeeper ensemble=127.0.0.1:56308 2024-11-11T21:46:54,441 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:54,443 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:54,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:414450x0, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:46:54,447 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:414450x0, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:46:54,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41445-0x10030c106990001 connected 2024-11-11T21:46:54,447 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T21:46:54,447 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T21:46:54,448 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T21:46:54,449 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:46:54,450 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41445 2024-11-11T21:46:54,450 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41445 2024-11-11T21:46:54,451 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41445 2024-11-11T21:46:54,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41445 2024-11-11T21:46:54,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41445 2024-11-11T21:46:54,462 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;22a1f13b90b0:44805 2024-11-11T21:46:54,463 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/22a1f13b90b0,44805,1731361614394 2024-11-11T21:46:54,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:46:54,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:46:54,464 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/22a1f13b90b0,44805,1731361614394 2024-11-11T21:46:54,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T21:46:54,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,465 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T21:46:54,466 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/22a1f13b90b0,44805,1731361614394 from backup master directory 2024-11-11T21:46:54,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/22a1f13b90b0,44805,1731361614394 2024-11-11T21:46:54,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:46:54,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:46:54,466 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:46:54,466 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=22a1f13b90b0,44805,1731361614394 2024-11-11T21:46:54,470 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/hbase.id] with ID: b62f6701-4f3d-4b7f-aee8-ca2518da825e 2024-11-11T21:46:54,470 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/.tmp/hbase.id 2024-11-11T21:46:54,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:46:54,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:46:54,475 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/.tmp/hbase.id]:[hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/hbase.id] 2024-11-11T21:46:54,486 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:54,486 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T21:46:54,488 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-11T21:46:54,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:46:54,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:46:54,499 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:46:54,499 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T21:46:54,500 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:46:54,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:46:54,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:46:54,509 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store 2024-11-11T21:46:54,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:46:54,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:46:54,515 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:54,515 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:46:54,515 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:54,515 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:54,515 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:46:54,515 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:54,515 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:46:54,515 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361614515Disabling compacts and flushes for region at 1731361614515Disabling writes for close at 1731361614515Writing region close event to WAL at 1731361614515Closed at 1731361614515 2024-11-11T21:46:54,516 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/.initializing 2024-11-11T21:46:54,516 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/WALs/22a1f13b90b0,44805,1731361614394 2024-11-11T21:46:54,518 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C44805%2C1731361614394, suffix=, logDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/WALs/22a1f13b90b0,44805,1731361614394, archiveDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/oldWALs, maxLogs=10 2024-11-11T21:46:54,519 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C44805%2C1731361614394.1731361614519 2024-11-11T21:46:54,523 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/WALs/22a1f13b90b0,44805,1731361614394/22a1f13b90b0%2C44805%2C1731361614394.1731361614519 2024-11-11T21:46:54,524 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37713:37713),(127.0.0.1/127.0.0.1:40917:40917)] 2024-11-11T21:46:54,524 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:46:54,524 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:54,525 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,525 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,527 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T21:46:54,527 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:54,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:54,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T21:46:54,529 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:54,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:46:54,529 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,530 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T21:46:54,530 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:54,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:46:54,531 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T21:46:54,532 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:54,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:46:54,532 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,533 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,533 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,535 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,535 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,536 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T21:46:54,538 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:46:54,540 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:46:54,541 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873129, jitterRate=0.11024212837219238}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T21:46:54,542 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731361614525Initializing all the Stores at 1731361614525Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361614526 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361614526Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361614526Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361614526Cleaning up temporary data from old regions at 1731361614535 (+9 ms)Region opened successfully at 1731361614542 (+7 ms) 2024-11-11T21:46:54,542 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T21:46:54,546 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6726c4aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:46:54,547 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T21:46:54,547 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T21:46:54,547 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T21:46:54,547 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T21:46:54,548 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T21:46:54,548 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T21:46:54,548 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T21:46:54,550 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T21:46:54,551 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T21:46:54,552 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T21:46:54,552 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T21:46:54,553 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T21:46:54,554 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T21:46:54,554 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T21:46:54,555 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T21:46:54,555 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T21:46:54,556 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T21:46:54,557 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T21:46:54,558 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T21:46:54,559 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T21:46:54,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:46:54,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:46:54,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,560 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=22a1f13b90b0,44805,1731361614394, sessionid=0x10030c106990000, setting cluster-up flag (Was=false) 2024-11-11T21:46:54,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,564 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T21:46:54,565 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,44805,1731361614394 2024-11-11T21:46:54,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:54,570 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T21:46:54,571 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,44805,1731361614394 2024-11-11T21:46:54,572 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T21:46:54,574 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T21:46:54,574 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T21:46:54,574 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T21:46:54,575 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 22a1f13b90b0,44805,1731361614394 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T21:46:54,576 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:46:54,576 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:46:54,576 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:46:54,576 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:46:54,576 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/22a1f13b90b0:0, corePoolSize=10, maxPoolSize=10 2024-11-11T21:46:54,577 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,577 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:46:54,577 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,577 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731361644577 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T21:46:54,578 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T21:46:54,578 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:46:54,578 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T21:46:54,579 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T21:46:54,579 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T21:46:54,579 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361614579,5,FailOnTimeoutGroup] 2024-11-11T21:46:54,579 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361614579,5,FailOnTimeoutGroup] 2024-11-11T21:46:54,579 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,579 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T21:46:54,579 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,579 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,580 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:54,580 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T21:46:54,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:46:54,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:46:54,588 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T21:46:54,588 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c 2024-11-11T21:46:54,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:46:54,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:46:54,594 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:54,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:46:54,597 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:46:54,597 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:54,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:54,598 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:46:54,599 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:46:54,599 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:54,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:54,600 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:46:54,601 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:46:54,601 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:54,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:54,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:46:54,603 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:46:54,603 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:54,604 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:54,604 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:46:54,605 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740 2024-11-11T21:46:54,605 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740 2024-11-11T21:46:54,606 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:46:54,606 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:46:54,607 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:46:54,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:46:54,610 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:46:54,611 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775390, jitterRate=-0.014040663838386536}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:46:54,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731361614595Initializing all the Stores at 1731361614595Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361614595Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361614595Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361614595Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361614595Cleaning up temporary data from old regions at 1731361614606 (+11 ms)Region opened successfully at 1731361614611 (+5 ms) 2024-11-11T21:46:54,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:46:54,612 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:46:54,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:46:54,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:46:54,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:46:54,612 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:46:54,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361614611Disabling compacts and flushes for region at 1731361614611Disabling writes for close at 1731361614612 (+1 ms)Writing region close event to WAL at 1731361614612Closed at 1731361614612 2024-11-11T21:46:54,614 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:46:54,614 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T21:46:54,614 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T21:46:54,615 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:46:54,617 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T21:46:54,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:54,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:54,654 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(746): ClusterId : b62f6701-4f3d-4b7f-aee8-ca2518da825e 2024-11-11T21:46:54,654 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T21:46:54,655 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T21:46:54,655 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T21:46:54,657 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T21:46:54,657 DEBUG [RS:0;22a1f13b90b0:41445 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ecc6c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:46:54,671 DEBUG [RS:0;22a1f13b90b0:41445 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;22a1f13b90b0:41445 2024-11-11T21:46:54,672 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T21:46:54,672 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T21:46:54,672 DEBUG [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T21:46:54,672 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(2659): reportForDuty to master=22a1f13b90b0,44805,1731361614394 with port=41445, startcode=1731361614438 2024-11-11T21:46:54,673 DEBUG [RS:0;22a1f13b90b0:41445 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T21:46:54,674 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40623, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T21:46:54,675 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44805 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:54,675 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44805 {}] master.ServerManager(517): Registering regionserver=22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:54,676 DEBUG [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c 2024-11-11T21:46:54,676 DEBUG [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41821 2024-11-11T21:46:54,676 DEBUG [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T21:46:54,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:46:54,678 DEBUG [RS:0;22a1f13b90b0:41445 {}] zookeeper.ZKUtil(111): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:54,678 WARN [RS:0;22a1f13b90b0:41445 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:46:54,678 INFO [RS:0;22a1f13b90b0:41445 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:46:54,678 DEBUG [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:54,679 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [22a1f13b90b0,41445,1731361614438] 2024-11-11T21:46:54,681 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T21:46:54,683 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T21:46:54,683 INFO [RS:0;22a1f13b90b0:41445 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T21:46:54,683 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,683 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T21:46:54,684 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T21:46:54,684 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,684 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:46:54,685 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:46:54,685 DEBUG [RS:0;22a1f13b90b0:41445 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:46:54,686 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,686 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,686 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,686 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,686 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,686 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,41445,1731361614438-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:46:54,700 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T21:46:54,700 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,41445,1731361614438-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,700 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,700 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.Replication(171): 22a1f13b90b0,41445,1731361614438 started 2024-11-11T21:46:54,713 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:54,713 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(1482): Serving as 22a1f13b90b0,41445,1731361614438, RpcServer on 22a1f13b90b0/172.17.0.2:41445, sessionid=0x10030c106990001 2024-11-11T21:46:54,714 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T21:46:54,714 DEBUG [RS:0;22a1f13b90b0:41445 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:54,714 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,41445,1731361614438' 2024-11-11T21:46:54,714 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T21:46:54,714 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T21:46:54,715 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T21:46:54,715 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T21:46:54,715 DEBUG [RS:0;22a1f13b90b0:41445 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:54,715 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,41445,1731361614438' 2024-11-11T21:46:54,715 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T21:46:54,715 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T21:46:54,716 DEBUG [RS:0;22a1f13b90b0:41445 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T21:46:54,716 INFO [RS:0;22a1f13b90b0:41445 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T21:46:54,716 INFO [RS:0;22a1f13b90b0:41445 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T21:46:54,767 WARN [22a1f13b90b0:44805 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T21:46:54,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:54,818 INFO [RS:0;22a1f13b90b0:41445 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C41445%2C1731361614438, suffix=, logDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438, archiveDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/oldWALs, maxLogs=32 2024-11-11T21:46:54,818 INFO [RS:0;22a1f13b90b0:41445 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41445%2C1731361614438.1731361614818 2024-11-11T21:46:54,824 INFO [RS:0;22a1f13b90b0:41445 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361614818 2024-11-11T21:46:54,825 DEBUG [RS:0;22a1f13b90b0:41445 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37713:37713),(127.0.0.1/127.0.0.1:40917:40917)] 2024-11-11T21:46:55,017 DEBUG [22a1f13b90b0:44805 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T21:46:55,018 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:55,020 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,41445,1731361614438, state=OPENING 2024-11-11T21:46:55,021 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T21:46:55,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:55,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:46:55,023 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:46:55,023 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:46:55,023 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:46:55,023 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,41445,1731361614438}] 2024-11-11T21:46:55,179 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T21:46:55,184 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54709, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T21:46:55,190 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T21:46:55,190 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:46:55,193 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C41445%2C1731361614438.meta, suffix=.meta, logDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438, archiveDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/oldWALs, maxLogs=32 2024-11-11T21:46:55,194 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41445%2C1731361614438.meta.1731361615193.meta 2024-11-11T21:46:55,200 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.meta.1731361615193.meta 2024-11-11T21:46:55,202 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37713:37713),(127.0.0.1/127.0.0.1:40917:40917)] 2024-11-11T21:46:55,203 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:46:55,203 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T21:46:55,203 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T21:46:55,203 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T21:46:55,203 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T21:46:55,203 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:55,203 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T21:46:55,203 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T21:46:55,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:46:55,206 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:46:55,206 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:55,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:55,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:46:55,207 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:46:55,207 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:55,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:55,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:46:55,208 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:46:55,208 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:55,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:55,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:46:55,209 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:46:55,209 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:55,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:46:55,210 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:46:55,210 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740 2024-11-11T21:46:55,211 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740 2024-11-11T21:46:55,212 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:46:55,212 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:46:55,213 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:46:55,214 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:46:55,215 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797944, jitterRate=0.014638707041740417}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:46:55,215 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T21:46:55,215 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731361615204Writing region info on filesystem at 1731361615204Initializing all the Stores at 1731361615204Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361615204Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361615205 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361615205Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361615205Cleaning up temporary data from old regions at 1731361615212 (+7 ms)Running coprocessor post-open hooks at 1731361615215 (+3 ms)Region opened successfully at 1731361615215 2024-11-11T21:46:55,216 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731361615178 2024-11-11T21:46:55,219 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T21:46:55,219 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T21:46:55,220 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:55,222 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,41445,1731361614438, state=OPEN 2024-11-11T21:46:55,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:46:55,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:46:55,224 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:55,224 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:46:55,224 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:46:55,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T21:46:55,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,41445,1731361614438 in 201 msec 2024-11-11T21:46:55,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T21:46:55,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 613 msec 2024-11-11T21:46:55,231 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:46:55,231 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T21:46:55,232 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:46:55,233 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,41445,1731361614438, seqNum=-1] 2024-11-11T21:46:55,233 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:46:55,234 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34657, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:46:55,240 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 665 msec 2024-11-11T21:46:55,241 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731361615241, completionTime=-1 2024-11-11T21:46:55,241 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T21:46:55,241 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T21:46:55,242 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T21:46:55,242 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731361675242 2024-11-11T21:46:55,243 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731361735243 2024-11-11T21:46:55,243 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T21:46:55,243 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,44805,1731361614394-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:55,243 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,44805,1731361614394-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:55,243 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,44805,1731361614394-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:55,243 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-22a1f13b90b0:44805, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:55,243 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:55,243 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:55,245 DEBUG [master/22a1f13b90b0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T21:46:55,247 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.781sec 2024-11-11T21:46:55,247 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T21:46:55,247 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T21:46:55,247 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T21:46:55,247 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T21:46:55,247 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T21:46:55,247 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,44805,1731361614394-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:46:55,247 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,44805,1731361614394-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T21:46:55,249 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T21:46:55,249 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T21:46:55,249 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,44805,1731361614394-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:46:55,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c2896a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:46:55,254 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 22a1f13b90b0,44805,-1 for getting cluster id 2024-11-11T21:46:55,254 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T21:46:55,256 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b62f6701-4f3d-4b7f-aee8-ca2518da825e' 2024-11-11T21:46:55,256 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T21:46:55,257 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b62f6701-4f3d-4b7f-aee8-ca2518da825e" 2024-11-11T21:46:55,257 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd27b30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:46:55,257 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [22a1f13b90b0,44805,-1] 2024-11-11T21:46:55,257 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T21:46:55,257 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:46:55,258 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44394, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T21:46:55,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7fc8f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:46:55,260 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:46:55,261 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,41445,1731361614438, seqNum=-1] 2024-11-11T21:46:55,261 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:46:55,263 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48690, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:46:55,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=22a1f13b90b0,44805,1731361614394 2024-11-11T21:46:55,264 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:46:55,268 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T21:46:55,269 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T21:46:55,270 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 22a1f13b90b0,44805,1731361614394 2024-11-11T21:46:55,270 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4000e422 2024-11-11T21:46:55,270 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T21:46:55,271 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44404, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T21:46:55,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T21:46:55,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T21:46:55,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:46:55,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:46:55,275 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T21:46:55,275 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:55,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-11T21:46:55,276 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T21:46:55,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:46:55,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741835_1011 (size=405) 2024-11-11T21:46:55,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741835_1011 (size=405) 2024-11-11T21:46:55,284 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0129225d5ce7cbf275a23daff9954cc7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c 2024-11-11T21:46:55,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741836_1012 (size=88) 2024-11-11T21:46:55,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741836_1012 (size=88) 2024-11-11T21:46:55,289 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:55,290 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 0129225d5ce7cbf275a23daff9954cc7, disabling compactions & flushes 2024-11-11T21:46:55,290 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:46:55,290 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:46:55,290 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. after waiting 0 ms 2024-11-11T21:46:55,290 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:46:55,290 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:46:55,290 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0129225d5ce7cbf275a23daff9954cc7: Waiting for close lock at 1731361615290Disabling compacts and flushes for region at 1731361615290Disabling writes for close at 1731361615290Writing region close event to WAL at 1731361615290Closed at 1731361615290 2024-11-11T21:46:55,292 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T21:46:55,292 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731361615292"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731361615292"}]},"ts":"1731361615292"} 2024-11-11T21:46:55,295 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T21:46:55,296 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T21:46:55,297 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361615296"}]},"ts":"1731361615296"} 2024-11-11T21:46:55,299 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-11T21:46:55,299 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0129225d5ce7cbf275a23daff9954cc7, ASSIGN}] 2024-11-11T21:46:55,301 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0129225d5ce7cbf275a23daff9954cc7, ASSIGN 2024-11-11T21:46:55,302 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0129225d5ce7cbf275a23daff9954cc7, ASSIGN; state=OFFLINE, location=22a1f13b90b0,41445,1731361614438; forceNewPlan=false, retain=false 2024-11-11T21:46:55,453 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0129225d5ce7cbf275a23daff9954cc7, regionState=OPENING, regionLocation=22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:55,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0129225d5ce7cbf275a23daff9954cc7, ASSIGN because future has completed 2024-11-11T21:46:55,458 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0129225d5ce7cbf275a23daff9954cc7, server=22a1f13b90b0,41445,1731361614438}] 2024-11-11T21:46:55,625 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:46:55,625 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0129225d5ce7cbf275a23daff9954cc7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:46:55,626 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,626 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:46:55,627 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,627 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,630 INFO [StoreOpener-0129225d5ce7cbf275a23daff9954cc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,632 INFO [StoreOpener-0129225d5ce7cbf275a23daff9954cc7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0129225d5ce7cbf275a23daff9954cc7 columnFamilyName info 2024-11-11T21:46:55,632 DEBUG [StoreOpener-0129225d5ce7cbf275a23daff9954cc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:46:55,633 INFO [StoreOpener-0129225d5ce7cbf275a23daff9954cc7-1 {}] regionserver.HStore(327): Store=0129225d5ce7cbf275a23daff9954cc7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:46:55,633 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,634 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,634 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,635 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,635 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,638 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,641 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:46:55,641 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0129225d5ce7cbf275a23daff9954cc7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870587, jitterRate=0.10700888931751251}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T21:46:55,641 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:46:55,642 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0129225d5ce7cbf275a23daff9954cc7: Running coprocessor pre-open hook at 1731361615627Writing region info on filesystem at 1731361615627Initializing all the Stores at 1731361615629 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361615629Cleaning up temporary data from old regions at 1731361615635 (+6 ms)Running coprocessor post-open hooks at 1731361615641 (+6 ms)Region opened successfully at 1731361615642 (+1 ms) 2024-11-11T21:46:55,643 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7., pid=6, masterSystemTime=1731361615614 2024-11-11T21:46:55,646 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:46:55,646 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:46:55,648 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0129225d5ce7cbf275a23daff9954cc7, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,41445,1731361614438 2024-11-11T21:46:55,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:55,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:55,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0129225d5ce7cbf275a23daff9954cc7, server=22a1f13b90b0,41445,1731361614438 because future has completed 2024-11-11T21:46:55,655 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T21:46:55,655 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0129225d5ce7cbf275a23daff9954cc7, server=22a1f13b90b0,41445,1731361614438 in 194 msec 2024-11-11T21:46:55,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T21:46:55,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0129225d5ce7cbf275a23daff9954cc7, ASSIGN in 356 msec 2024-11-11T21:46:55,659 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T21:46:55,659 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361615659"}]},"ts":"1731361615659"} 2024-11-11T21:46:55,661 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-11T21:46:55,662 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T21:46:55,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 390 msec 2024-11-11T21:46:55,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:56,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:56,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:56,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:56,968 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T21:46:56,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:56,999 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:46:57,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:57,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:57,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:58,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:58,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:58,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:59,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:59,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:46:59,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:00,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:00,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:00,682 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T21:47:00,682 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-11T21:47:00,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:01,463 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T21:47:01,464 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T21:47:01,465 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:47:01,465 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T21:47:01,466 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T21:47:01,466 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-11T21:47:01,467 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:01,467 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T21:47:01,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:01,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:01,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:02,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:02,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:02,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:03,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:03,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:03,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:04,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:04,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:04,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:05,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:47:05,354 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T21:47:05,354 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-11T21:47:05,361 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:05,361 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:05,365 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7., hostname=22a1f13b90b0,41445,1731361614438, seqNum=2] 2024-11-11T21:47:05,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:05,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:05,377 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T21:47:05,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T21:47:05,378 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T21:47:05,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T21:47:05,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41445 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-11T21:47:05,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:05,549 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 0129225d5ce7cbf275a23daff9954cc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T21:47:05,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/0419006cb073424aa5e00099befc1ce6 is 1080, key is row0001/info:/1731361625366/Put/seqid=0 2024-11-11T21:47:05,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741837_1013 (size=6033) 2024-11-11T21:47:05,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741837_1013 (size=6033) 2024-11-11T21:47:05,571 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/0419006cb073424aa5e00099befc1ce6 2024-11-11T21:47:05,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/0419006cb073424aa5e00099befc1ce6 as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/0419006cb073424aa5e00099befc1ce6 2024-11-11T21:47:05,583 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/0419006cb073424aa5e00099befc1ce6, entries=1, sequenceid=5, filesize=5.9 K 2024-11-11T21:47:05,584 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0129225d5ce7cbf275a23daff9954cc7 in 34ms, sequenceid=5, compaction requested=false 2024-11-11T21:47:05,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 0129225d5ce7cbf275a23daff9954cc7: 2024-11-11T21:47:05,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:05,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-11T21:47:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-11T21:47:05,590 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T21:47:05,590 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 208 msec 2024-11-11T21:47:05,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 218 msec 2024-11-11T21:47:05,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:05,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:05,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:06,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:06,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:06,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:07,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:07,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:07,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:08,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:08,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:08,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:08,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 after 68073ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:47:09,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:09,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:09,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:10,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:10,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:10,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:11,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:11,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:11,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:12,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:12,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:12,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:13,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:13,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:13,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:14,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:14,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:14,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:15,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T21:47:15,424 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T21:47:15,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:15,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:15,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-11T21:47:15,438 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T21:47:15,440 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T21:47:15,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T21:47:15,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41445 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-11T21:47:15,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:15,597 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 0129225d5ce7cbf275a23daff9954cc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T21:47:15,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/c75616180605412886d585032b63468d is 1080, key is row0002/info:/1731361635428/Put/seqid=0 2024-11-11T21:47:15,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741838_1014 (size=6033) 2024-11-11T21:47:15,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741838_1014 (size=6033) 2024-11-11T21:47:15,615 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/c75616180605412886d585032b63468d 2024-11-11T21:47:15,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/c75616180605412886d585032b63468d as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/c75616180605412886d585032b63468d 2024-11-11T21:47:15,628 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/c75616180605412886d585032b63468d, entries=1, sequenceid=9, filesize=5.9 K 2024-11-11T21:47:15,629 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0129225d5ce7cbf275a23daff9954cc7 in 32ms, sequenceid=9, compaction requested=false 2024-11-11T21:47:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 0129225d5ce7cbf275a23daff9954cc7: 2024-11-11T21:47:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:15,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-11T21:47:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-11T21:47:15,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T21:47:15,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 191 msec 2024-11-11T21:47:15,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 201 msec 2024-11-11T21:47:15,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:15,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:15,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:16,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:16,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:16,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta after 68080ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:47:16,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 after 68091ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T21:47:16,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:17,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:17,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:17,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:18,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:18,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:18,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:19,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:19,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:19,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:20,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:20,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:20,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:21,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:21,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:21,833 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:22,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:22,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:22,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:23,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:23,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:23,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:24,377 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T21:47:24,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:24,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:24,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:25,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-11T21:47:25,472 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T21:47:25,475 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41445%2C1731361614438.1731361645475 2024-11-11T21:47:25,481 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:25,481 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:25,481 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:25,481 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:25,481 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:25,481 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361614818 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361645475 2024-11-11T21:47:25,482 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40917:40917),(127.0.0.1/127.0.0.1:37713:37713)] 2024-11-11T21:47:25,482 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361614818 is not closed yet, will try archiving it next time 2024-11-11T21:47:25,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:25,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741833_1009 (size=5546) 2024-11-11T21:47:25,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741833_1009 (size=5546) 2024-11-11T21:47:25,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-11T21:47:25,486 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T21:47:25,487 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T21:47:25,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T21:47:25,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41445 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-11T21:47:25,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:25,642 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 0129225d5ce7cbf275a23daff9954cc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T21:47:25,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/51590b2ff59b45b2b8d53470d9aa56d2 is 1080, key is row0003/info:/1731361645474/Put/seqid=0 2024-11-11T21:47:25,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741840_1016 (size=6033) 2024-11-11T21:47:25,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741840_1016 (size=6033) 2024-11-11T21:47:25,659 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/51590b2ff59b45b2b8d53470d9aa56d2 2024-11-11T21:47:25,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/51590b2ff59b45b2b8d53470d9aa56d2 as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/51590b2ff59b45b2b8d53470d9aa56d2 2024-11-11T21:47:25,674 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/51590b2ff59b45b2b8d53470d9aa56d2, entries=1, sequenceid=13, filesize=5.9 K 2024-11-11T21:47:25,675 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0129225d5ce7cbf275a23daff9954cc7 in 33ms, sequenceid=13, compaction requested=true 2024-11-11T21:47:25,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 0129225d5ce7cbf275a23daff9954cc7: 2024-11-11T21:47:25,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:25,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-11T21:47:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-11T21:47:25,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-11T21:47:25,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-11-11T21:47:25,682 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 197 msec 2024-11-11T21:47:25,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:25,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:25,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:26,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:26,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:26,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:27,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:27,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:27,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:28,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:28,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:28,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:29,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:29,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:29,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:30,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:30,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:30,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:31,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:31,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:31,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:32,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:32,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:32,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:33,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:33,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:33,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:34,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:34,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:34,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:35,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-11T21:47:35,504 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T21:47:35,504 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:47:35,508 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:47:35,508 DEBUG [Time-limited test {}] regionserver.HStore(1541): 0129225d5ce7cbf275a23daff9954cc7/info is initiating minor compaction (all files) 2024-11-11T21:47:35,508 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T21:47:35,509 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:35,509 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 0129225d5ce7cbf275a23daff9954cc7/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:35,509 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/0419006cb073424aa5e00099befc1ce6, hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/c75616180605412886d585032b63468d, hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/51590b2ff59b45b2b8d53470d9aa56d2] into tmpdir=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp, totalSize=17.7 K 2024-11-11T21:47:35,510 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0419006cb073424aa5e00099befc1ce6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731361625366 2024-11-11T21:47:35,511 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c75616180605412886d585032b63468d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731361635428 2024-11-11T21:47:35,512 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 51590b2ff59b45b2b8d53470d9aa56d2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731361645474 2024-11-11T21:47:35,527 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 0129225d5ce7cbf275a23daff9954cc7#info#compaction#44 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:47:35,528 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/0c54b7f406f64fa292257281c55e01ca is 1080, key is row0001/info:/1731361625366/Put/seqid=0 2024-11-11T21:47:35,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741841_1017 (size=8296) 2024-11-11T21:47:35,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741841_1017 (size=8296) 2024-11-11T21:47:35,540 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/0c54b7f406f64fa292257281c55e01ca as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/0c54b7f406f64fa292257281c55e01ca 2024-11-11T21:47:35,548 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0129225d5ce7cbf275a23daff9954cc7/info of 0129225d5ce7cbf275a23daff9954cc7 into 0c54b7f406f64fa292257281c55e01ca(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:47:35,548 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 0129225d5ce7cbf275a23daff9954cc7: 2024-11-11T21:47:35,551 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41445%2C1731361614438.1731361655551 2024-11-11T21:47:35,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:35,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:35,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:35,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:35,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:35,557 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361645475 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361655551 2024-11-11T21:47:35,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741839_1015 (size=2520) 2024-11-11T21:47:35,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741839_1015 (size=2520) 2024-11-11T21:47:35,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40917:40917),(127.0.0.1/127.0.0.1:37713:37713)] 2024-11-11T21:47:35,561 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361614818 to hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/oldWALs/22a1f13b90b0%2C41445%2C1731361614438.1731361614818 2024-11-11T21:47:35,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:35,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:35,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-11T21:47:35,564 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-11T21:47:35,565 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T21:47:35,565 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T21:47:35,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:35,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:35,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41445 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-11T21:47:35,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:35,720 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 0129225d5ce7cbf275a23daff9954cc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T21:47:35,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/221c5d7b57db4fb89264aada26458202 is 1080, key is row0000/info:/1731361655549/Put/seqid=0 2024-11-11T21:47:35,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741843_1019 (size=6033) 2024-11-11T21:47:35,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741843_1019 (size=6033) 2024-11-11T21:47:35,735 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/221c5d7b57db4fb89264aada26458202 2024-11-11T21:47:35,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/221c5d7b57db4fb89264aada26458202 as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/221c5d7b57db4fb89264aada26458202 2024-11-11T21:47:35,750 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/221c5d7b57db4fb89264aada26458202, entries=1, sequenceid=18, filesize=5.9 K 2024-11-11T21:47:35,751 INFO [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0129225d5ce7cbf275a23daff9954cc7 in 32ms, sequenceid=18, compaction requested=false 2024-11-11T21:47:35,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 0129225d5ce7cbf275a23daff9954cc7: 2024-11-11T21:47:35,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:35,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-11T21:47:35,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-11T21:47:35,756 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-11T21:47:35,756 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 188 msec 2024-11-11T21:47:35,758 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-11-11T21:47:35,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:36,170 INFO [master/22a1f13b90b0:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T21:47:36,170 INFO [master/22a1f13b90b0:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T21:47:36,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:36,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:36,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:37,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:37,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:37,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:38,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:38,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:38,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:39,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:39,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:39,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:40,626 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0129225d5ce7cbf275a23daff9954cc7, had cached 0 bytes from a total of 14329 2024-11-11T21:47:40,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:40,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:40,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:41,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:41,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:41,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:42,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:42,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:42,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:43,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:43,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:43,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:44,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:44,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:44,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:45,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44805 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-11T21:47:45,593 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-11T21:47:45,596 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C41445%2C1731361614438.1731361665596 2024-11-11T21:47:45,605 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,605 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,605 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,606 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,606 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,606 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361655551 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361665596 2024-11-11T21:47:45,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741842_1018 (size=2026) 2024-11-11T21:47:45,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741842_1018 (size=2026) 2024-11-11T21:47:45,608 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/WALs/22a1f13b90b0,41445,1731361614438/22a1f13b90b0%2C41445%2C1731361614438.1731361645475 to hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/oldWALs/22a1f13b90b0%2C41445%2C1731361614438.1731361645475 2024-11-11T21:47:45,609 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40917:40917),(127.0.0.1/127.0.0.1:37713:37713)] 2024-11-11T21:47:45,610 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T21:47:45,610 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:47:45,610 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:47:45,610 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:47:45,610 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:47:45,610 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T21:47:45,610 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T21:47:45,610 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=688427081, stopped=false 2024-11-11T21:47:45,610 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=22a1f13b90b0,44805,1731361614394 2024-11-11T21:47:45,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:47:45,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:47:45,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:45,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:45,612 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:47:45,612 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:47:45,612 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:47:45,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:47:45,612 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '22a1f13b90b0,41445,1731361614438' ***** 2024-11-11T21:47:45,612 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T21:47:45,612 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:47:45,613 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:47:45,615 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T21:47:45,615 INFO [RS:0;22a1f13b90b0:41445 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T21:47:45,615 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T21:47:45,615 INFO [RS:0;22a1f13b90b0:41445 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T21:47:45,615 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(3091): Received CLOSE for 0129225d5ce7cbf275a23daff9954cc7 2024-11-11T21:47:45,617 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(959): stopping server 22a1f13b90b0,41445,1731361614438 2024-11-11T21:47:45,617 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:47:45,617 INFO [RS:0;22a1f13b90b0:41445 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;22a1f13b90b0:41445. 2024-11-11T21:47:45,617 DEBUG [RS:0;22a1f13b90b0:41445 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:47:45,617 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0129225d5ce7cbf275a23daff9954cc7, disabling compactions & flushes 2024-11-11T21:47:45,617 DEBUG [RS:0;22a1f13b90b0:41445 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:47:45,617 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:45,617 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:45,617 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T21:47:45,617 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. after waiting 0 ms 2024-11-11T21:47:45,617 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T21:47:45,617 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:45,617 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T21:47:45,617 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 0129225d5ce7cbf275a23daff9954cc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-11T21:47:45,618 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T21:47:45,629 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T21:47:45,629 DEBUG [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(1325): Online Regions={0129225d5ce7cbf275a23daff9954cc7=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T21:47:45,629 DEBUG [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(1351): Waiting on 0129225d5ce7cbf275a23daff9954cc7, 1588230740 2024-11-11T21:47:45,629 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:47:45,629 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:47:45,629 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:47:45,629 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:47:45,629 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:47:45,629 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-11T21:47:45,633 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/33d5730ed6014709a92297526a923ee6 is 1080, key is row0001/info:/1731361665594/Put/seqid=0 2024-11-11T21:47:45,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741845_1021 (size=6033) 2024-11-11T21:47:45,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741845_1021 (size=6033) 2024-11-11T21:47:45,648 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/33d5730ed6014709a92297526a923ee6 2024-11-11T21:47:45,655 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/.tmp/info/31b5cc42901348eba811d44880580295 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7./info:regioninfo/1731361615647/Put/seqid=0 2024-11-11T21:47:45,660 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/.tmp/info/33d5730ed6014709a92297526a923ee6 as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/33d5730ed6014709a92297526a923ee6 2024-11-11T21:47:45,667 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/33d5730ed6014709a92297526a923ee6, entries=1, sequenceid=22, filesize=5.9 K 2024-11-11T21:47:45,668 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0129225d5ce7cbf275a23daff9954cc7 in 51ms, sequenceid=22, compaction requested=true 2024-11-11T21:47:45,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741846_1022 (size=7308) 2024-11-11T21:47:45,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741846_1022 (size=7308) 2024-11-11T21:47:45,679 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/.tmp/info/31b5cc42901348eba811d44880580295 2024-11-11T21:47:45,682 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/0419006cb073424aa5e00099befc1ce6, hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/c75616180605412886d585032b63468d, hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/51590b2ff59b45b2b8d53470d9aa56d2] to archive 2024-11-11T21:47:45,691 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T21:47:45,691 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T21:47:45,693 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T21:47:45,701 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/0419006cb073424aa5e00099befc1ce6 to hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/0419006cb073424aa5e00099befc1ce6 2024-11-11T21:47:45,704 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/c75616180605412886d585032b63468d to hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/c75616180605412886d585032b63468d 2024-11-11T21:47:45,706 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/51590b2ff59b45b2b8d53470d9aa56d2 to hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/info/51590b2ff59b45b2b8d53470d9aa56d2 2024-11-11T21:47:45,706 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=22a1f13b90b0:44805 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T21:47:45,706 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0419006cb073424aa5e00099befc1ce6=6033, c75616180605412886d585032b63468d=6033, 51590b2ff59b45b2b8d53470d9aa56d2=6033] 2024-11-11T21:47:45,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:45,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:45,716 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/.tmp/ns/31629adfd8e045b38af4874d58207a2b is 43, key is default/ns:d/1731361615235/Put/seqid=0 2024-11-11T21:47:45,718 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0129225d5ce7cbf275a23daff9954cc7/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-11T21:47:45,718 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:45,718 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0129225d5ce7cbf275a23daff9954cc7: Waiting for close lock at 1731361665617Running coprocessor pre-close hooks at 1731361665617Disabling compacts and flushes for region at 1731361665617Disabling writes for close at 1731361665617Obtaining lock to block concurrent updates at 1731361665617Preparing flush snapshotting stores in 0129225d5ce7cbf275a23daff9954cc7 at 1731361665617Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731361665618 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. at 1731361665619 (+1 ms)Flushing 0129225d5ce7cbf275a23daff9954cc7/info: creating writer at 1731361665619Flushing 0129225d5ce7cbf275a23daff9954cc7/info: appending metadata at 1731361665632 (+13 ms)Flushing 0129225d5ce7cbf275a23daff9954cc7/info: closing flushed file at 1731361665632Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@700236a5: reopening flushed file at 1731361665659 (+27 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0129225d5ce7cbf275a23daff9954cc7 in 51ms, sequenceid=22, compaction requested=true at 1731361665668 (+9 ms)Writing region close event to WAL at 1731361665709 (+41 ms)Running coprocessor post-close hooks at 1731361665718 (+9 ms)Closed at 1731361665718 2024-11-11T21:47:45,719 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731361615272.0129225d5ce7cbf275a23daff9954cc7. 2024-11-11T21:47:45,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741847_1023 (size=5153) 2024-11-11T21:47:45,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741847_1023 (size=5153) 2024-11-11T21:47:45,727 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/.tmp/ns/31629adfd8e045b38af4874d58207a2b 2024-11-11T21:47:45,752 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/.tmp/table/08b6f102a2a74defb63494cabeaa24a4 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731361615659/Put/seqid=0 2024-11-11T21:47:45,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741848_1024 (size=5508) 2024-11-11T21:47:45,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741848_1024 (size=5508) 2024-11-11T21:47:45,759 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/.tmp/table/08b6f102a2a74defb63494cabeaa24a4 2024-11-11T21:47:45,768 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/.tmp/info/31b5cc42901348eba811d44880580295 as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/info/31b5cc42901348eba811d44880580295 2024-11-11T21:47:45,774 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/info/31b5cc42901348eba811d44880580295, entries=10, sequenceid=11, filesize=7.1 K 2024-11-11T21:47:45,776 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/.tmp/ns/31629adfd8e045b38af4874d58207a2b as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/ns/31629adfd8e045b38af4874d58207a2b 2024-11-11T21:47:45,789 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/ns/31629adfd8e045b38af4874d58207a2b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T21:47:45,790 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/.tmp/table/08b6f102a2a74defb63494cabeaa24a4 as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/table/08b6f102a2a74defb63494cabeaa24a4 2024-11-11T21:47:45,798 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/table/08b6f102a2a74defb63494cabeaa24a4, entries=2, sequenceid=11, filesize=5.4 K 2024-11-11T21:47:45,800 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 170ms, sequenceid=11, compaction requested=false 2024-11-11T21:47:45,809 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T21:47:45,810 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:47:45,810 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:47:45,810 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361665629Running coprocessor pre-close hooks at 1731361665629Disabling compacts and flushes for region at 1731361665629Disabling writes for close at 1731361665629Obtaining lock to block concurrent updates at 1731361665629Preparing flush snapshotting stores in 1588230740 at 1731361665629Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731361665630 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731361665631 (+1 ms)Flushing 1588230740/info: creating writer at 1731361665631Flushing 1588230740/info: appending metadata at 1731361665655 (+24 ms)Flushing 1588230740/info: closing flushed file at 1731361665655Flushing 1588230740/ns: creating writer at 1731361665694 (+39 ms)Flushing 1588230740/ns: appending metadata at 1731361665716 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731361665716Flushing 1588230740/table: creating writer at 1731361665734 (+18 ms)Flushing 1588230740/table: appending metadata at 1731361665752 (+18 ms)Flushing 1588230740/table: closing flushed file at 1731361665752Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30644f49: reopening flushed file at 1731361665766 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e98d03d: reopening flushed file at 1731361665775 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5116932e: reopening flushed file at 1731361665789 (+14 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 170ms, sequenceid=11, compaction requested=false at 1731361665800 (+11 ms)Writing region close event to WAL at 1731361665806 (+6 ms)Running coprocessor post-close hooks at 1731361665810 (+4 ms)Closed at 1731361665810 2024-11-11T21:47:45,810 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T21:47:45,829 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(976): stopping server 22a1f13b90b0,41445,1731361614438; all regions closed. 2024-11-11T21:47:45,830 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,830 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,830 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,830 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,830 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741834_1010 (size=3306) 2024-11-11T21:47:45,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741834_1010 (size=3306) 2024-11-11T21:47:45,835 DEBUG [RS:0;22a1f13b90b0:41445 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/oldWALs 2024-11-11T21:47:45,835 INFO [RS:0;22a1f13b90b0:41445 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C41445%2C1731361614438.meta:.meta(num 1731361615193) 2024-11-11T21:47:45,835 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,835 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,835 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,835 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,835 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:45,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741844_1020 (size=1252) 2024-11-11T21:47:45,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741844_1020 (size=1252) 2024-11-11T21:47:45,850 DEBUG [RS:0;22a1f13b90b0:41445 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/oldWALs 2024-11-11T21:47:45,850 INFO [RS:0;22a1f13b90b0:41445 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C41445%2C1731361614438:(num 1731361665596) 2024-11-11T21:47:45,850 DEBUG [RS:0;22a1f13b90b0:41445 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:47:45,850 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:47:45,850 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:47:45,850 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.ChoreService(370): Chore service for: regionserver/22a1f13b90b0:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T21:47:45,850 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:47:45,850 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:47:45,850 INFO [RS:0;22a1f13b90b0:41445 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41445 2024-11-11T21:47:45,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/22a1f13b90b0,41445,1731361614438 2024-11-11T21:47:45,853 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:47:45,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:47:45,854 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [22a1f13b90b0,41445,1731361614438] 2024-11-11T21:47:45,855 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/22a1f13b90b0,41445,1731361614438 already deleted, retry=false 2024-11-11T21:47:45,855 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 22a1f13b90b0,41445,1731361614438 expired; onlineServers=0 2024-11-11T21:47:45,855 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '22a1f13b90b0,44805,1731361614394' ***** 2024-11-11T21:47:45,855 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T21:47:45,855 INFO [M:0;22a1f13b90b0:44805 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:47:45,856 INFO [M:0;22a1f13b90b0:44805 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:47:45,856 DEBUG [M:0;22a1f13b90b0:44805 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T21:47:45,856 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T21:47:45,856 DEBUG [M:0;22a1f13b90b0:44805 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T21:47:45,856 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361614579 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361614579,5,FailOnTimeoutGroup] 2024-11-11T21:47:45,856 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361614579 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361614579,5,FailOnTimeoutGroup] 2024-11-11T21:47:45,856 INFO [M:0;22a1f13b90b0:44805 {}] hbase.ChoreService(370): Chore service for: master/22a1f13b90b0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T21:47:45,856 INFO [M:0;22a1f13b90b0:44805 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:47:45,856 DEBUG [M:0;22a1f13b90b0:44805 {}] master.HMaster(1795): Stopping service threads 2024-11-11T21:47:45,856 INFO [M:0;22a1f13b90b0:44805 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T21:47:45,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:45,856 INFO [M:0;22a1f13b90b0:44805 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:47:45,856 INFO [M:0;22a1f13b90b0:44805 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T21:47:45,856 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T21:47:45,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T21:47:45,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:45,858 DEBUG [M:0;22a1f13b90b0:44805 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-11T21:47:45,858 DEBUG [M:0;22a1f13b90b0:44805 {}] master.ActiveMasterManager(353): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-11T21:47:45,859 INFO [M:0;22a1f13b90b0:44805 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/.lastflushedseqids 2024-11-11T21:47:45,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741849_1025 (size=130) 2024-11-11T21:47:45,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741849_1025 (size=130) 2024-11-11T21:47:45,871 INFO [M:0;22a1f13b90b0:44805 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T21:47:45,871 INFO [M:0;22a1f13b90b0:44805 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T21:47:45,871 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:47:45,871 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:47:45,871 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:47:45,871 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:47:45,871 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:47:45,871 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.58 KB heapSize=54.99 KB 2024-11-11T21:47:45,890 DEBUG [M:0;22a1f13b90b0:44805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f0a66c3c53dc48b4a0cb330ba839ee26 is 82, key is hbase:meta,,1/info:regioninfo/1731361615220/Put/seqid=0 2024-11-11T21:47:45,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741850_1026 (size=5672) 2024-11-11T21:47:45,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741850_1026 (size=5672) 2024-11-11T21:47:45,902 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f0a66c3c53dc48b4a0cb330ba839ee26 2024-11-11T21:47:45,930 DEBUG [M:0;22a1f13b90b0:44805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a5f0031e27fa428e9873496c0bcaaaea is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731361615664/Put/seqid=0 2024-11-11T21:47:45,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741851_1027 (size=7822) 2024-11-11T21:47:45,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741851_1027 (size=7822) 2024-11-11T21:47:45,936 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.98 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a5f0031e27fa428e9873496c0bcaaaea 2024-11-11T21:47:45,942 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a5f0031e27fa428e9873496c0bcaaaea 2024-11-11T21:47:45,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:47:45,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41445-0x10030c106990001, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:47:45,955 INFO [RS:0;22a1f13b90b0:41445 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:47:45,955 INFO [RS:0;22a1f13b90b0:41445 {}] regionserver.HRegionServer(1031): Exiting; stopping=22a1f13b90b0,41445,1731361614438; zookeeper connection closed. 2024-11-11T21:47:45,955 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5035a733 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5035a733 2024-11-11T21:47:45,955 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T21:47:45,962 DEBUG [M:0;22a1f13b90b0:44805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c72f277a8226442eb9677f769b34476e is 69, key is 22a1f13b90b0,41445,1731361614438/rs:state/1731361614675/Put/seqid=0 2024-11-11T21:47:45,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741852_1028 (size=5156) 2024-11-11T21:47:45,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741852_1028 (size=5156) 2024-11-11T21:47:45,970 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c72f277a8226442eb9677f769b34476e 2024-11-11T21:47:45,995 DEBUG [M:0;22a1f13b90b0:44805 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5c4f3ced9b01463fb0ece9cc4e33b988 is 52, key is load_balancer_on/state:d/1731361615267/Put/seqid=0 2024-11-11T21:47:46,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741853_1029 (size=5056) 2024-11-11T21:47:46,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741853_1029 (size=5056) 2024-11-11T21:47:46,003 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5c4f3ced9b01463fb0ece9cc4e33b988 2024-11-11T21:47:46,011 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f0a66c3c53dc48b4a0cb330ba839ee26 as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f0a66c3c53dc48b4a0cb330ba839ee26 2024-11-11T21:47:46,016 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f0a66c3c53dc48b4a0cb330ba839ee26, entries=8, sequenceid=121, filesize=5.5 K 2024-11-11T21:47:46,017 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a5f0031e27fa428e9873496c0bcaaaea as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a5f0031e27fa428e9873496c0bcaaaea 2024-11-11T21:47:46,023 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a5f0031e27fa428e9873496c0bcaaaea 2024-11-11T21:47:46,023 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a5f0031e27fa428e9873496c0bcaaaea, entries=14, sequenceid=121, filesize=7.6 K 2024-11-11T21:47:46,024 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c72f277a8226442eb9677f769b34476e as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c72f277a8226442eb9677f769b34476e 2024-11-11T21:47:46,030 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c72f277a8226442eb9677f769b34476e, entries=1, sequenceid=121, filesize=5.0 K 2024-11-11T21:47:46,031 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5c4f3ced9b01463fb0ece9cc4e33b988 as hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5c4f3ced9b01463fb0ece9cc4e33b988 2024-11-11T21:47:46,037 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41821/user/jenkins/test-data/027a917d-4839-3912-4507-05b24763640c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5c4f3ced9b01463fb0ece9cc4e33b988, entries=1, sequenceid=121, filesize=4.9 K 2024-11-11T21:47:46,038 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=121, compaction requested=false 2024-11-11T21:47:46,043 INFO [M:0;22a1f13b90b0:44805 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:47:46,043 DEBUG [M:0;22a1f13b90b0:44805 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361665871Disabling compacts and flushes for region at 1731361665871Disabling writes for close at 1731361665871Obtaining lock to block concurrent updates at 1731361665871Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731361665871Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44629, getHeapSize=56248, getOffHeapSize=0, getCellsCount=140 at 1731361665872 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731361665874 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731361665874Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731361665889 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731361665889Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731361665909 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731361665929 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731361665929Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731361665942 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731361665961 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731361665961Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731361665975 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731361665995 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731361665995Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3351aadc: reopening flushed file at 1731361666008 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72030a1e: reopening flushed file at 1731361666016 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23acfd1f: reopening flushed file at 1731361666023 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32b33613: reopening flushed file at 1731361666030 (+7 ms)Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=121, compaction requested=false at 1731361666038 (+8 ms)Writing region close event to WAL at 1731361666042 (+4 ms)Closed at 1731361666042 2024-11-11T21:47:46,043 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:46,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:46,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:46,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:46,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:47:46,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35597 is added to blk_1073741830_1006 (size=53026) 2024-11-11T21:47:46,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42237 is added to blk_1073741830_1006 (size=53026) 2024-11-11T21:47:46,046 INFO [M:0;22a1f13b90b0:44805 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T21:47:46,046 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:47:46,046 INFO [M:0;22a1f13b90b0:44805 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44805 2024-11-11T21:47:46,046 INFO [M:0;22a1f13b90b0:44805 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:47:46,147 INFO [M:0;22a1f13b90b0:44805 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:47:46,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:47:46,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44805-0x10030c106990000, quorum=127.0.0.1:56308, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:47:46,151 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@677a249b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:47:46,152 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e21aaf2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:47:46,152 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:47:46,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4732430a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:47:46,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e470e04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.log.dir/,STOPPED} 2024-11-11T21:47:46,154 WARN [BP-614608524-172.17.0.2-1731361613846 heartbeating to localhost/127.0.0.1:41821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:47:46,154 WARN [BP-614608524-172.17.0.2-1731361613846 heartbeating to localhost/127.0.0.1:41821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-614608524-172.17.0.2-1731361613846 (Datanode Uuid 078222af-057b-4223-94e8-67d37ec73a4d) service to localhost/127.0.0.1:41821 2024-11-11T21:47:46,155 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/data/data3/current/BP-614608524-172.17.0.2-1731361613846 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:47:46,155 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/data/data4/current/BP-614608524-172.17.0.2-1731361613846 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:47:46,155 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:47:46,155 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:47:46,156 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:47:46,162 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4114613b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:47:46,163 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21edc7fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:47:46,163 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:47:46,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f02078{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:47:46,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@214c2124{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.log.dir/,STOPPED} 2024-11-11T21:47:46,165 WARN [BP-614608524-172.17.0.2-1731361613846 heartbeating to localhost/127.0.0.1:41821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:47:46,165 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:47:46,165 WARN [BP-614608524-172.17.0.2-1731361613846 heartbeating to localhost/127.0.0.1:41821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-614608524-172.17.0.2-1731361613846 (Datanode Uuid 5a4c6fab-11bb-4f6b-853b-3fdc6147bdf7) service to localhost/127.0.0.1:41821 2024-11-11T21:47:46,165 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:47:46,165 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/data/data1/current/BP-614608524-172.17.0.2-1731361613846 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:47:46,166 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/cluster_8e92d297-9568-c07d-cc6a-c9bf617f5ee4/data/data2/current/BP-614608524-172.17.0.2-1731361613846 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:47:46,166 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:47:46,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16208fe2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:47:46,173 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a5372a5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:47:46,173 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:47:46,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@438136f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:47:46,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a7d1da5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.log.dir/,STOPPED} 2024-11-11T21:47:46,183 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T21:47:46,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T21:47:46,218 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 180) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:41821 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:41821 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/22a1f13b90b0:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:41821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:41821 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=50 (was 61), ProcessCount=11 (was 11), AvailableMemoryMB=5276 (was 4209) - AvailableMemoryMB LEAK? - 2024-11-11T21:47:46,226 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=50, ProcessCount=11, AvailableMemoryMB=5276 2024-11-11T21:47:46,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T21:47:46,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.log.dir so I do NOT create it in target/test-data/50339f40-5804-6a50-dc0b-928acb431703 2024-11-11T21:47:46,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/a36eba9d-9ec3-87b6-e299-35f95d4558e9/hadoop.tmp.dir so I do NOT create it in target/test-data/50339f40-5804-6a50-dc0b-928acb431703 2024-11-11T21:47:46,226 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c, deleteOnExit=true 2024-11-11T21:47:46,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T21:47:46,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/test.cache.data in system properties and HBase conf 2024-11-11T21:47:46,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T21:47:46,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.log.dir in system properties and HBase conf 2024-11-11T21:47:46,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T21:47:46,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T21:47:46,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T21:47:46,227 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T21:47:46,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/nfs.dump.dir in system properties and HBase conf 2024-11-11T21:47:46,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/java.io.tmpdir in system properties and HBase conf 2024-11-11T21:47:46,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:47:46,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T21:47:46,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T21:47:46,247 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:47:46,301 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:47:46,307 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:47:46,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:47:46,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:47:46,308 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:47:46,309 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:47:46,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10ce7a76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:47:46,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56433553{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:47:46,414 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52d230c9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/java.io.tmpdir/jetty-localhost-35743-hadoop-hdfs-3_4_1-tests_jar-_-any-11748518489358263351/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:47:46,415 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@240fc28c{HTTP/1.1, (http/1.1)}{localhost:35743} 2024-11-11T21:47:46,415 INFO [Time-limited test {}] server.Server(415): Started @240814ms 2024-11-11T21:47:46,427 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:47:46,544 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:47:46,547 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:47:46,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:47:46,562 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:47:46,562 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:47:46,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a48d3d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:47:46,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53cff5cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:47:46,681 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45890504{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/java.io.tmpdir/jetty-localhost-42893-hadoop-hdfs-3_4_1-tests_jar-_-any-17233964142155681315/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:47:46,682 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d639fc0{HTTP/1.1, (http/1.1)}{localhost:42893} 2024-11-11T21:47:46,682 INFO [Time-limited test {}] server.Server(415): Started @241081ms 2024-11-11T21:47:46,683 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:47:46,689 INFO [regionserver/22a1f13b90b0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:47:46,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:46,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:46,726 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:47:46,731 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:47:46,733 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:47:46,733 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:47:46,733 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T21:47:46,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7305dd28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:47:46,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1dab95de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:47:46,751 WARN [Thread-1952 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/data/data1/current/BP-1724013944-172.17.0.2-1731361666251/current, will proceed with Du for space computation calculation, 2024-11-11T21:47:46,761 WARN [Thread-1953 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/data/data2/current/BP-1724013944-172.17.0.2-1731361666251/current, will proceed with Du for space computation calculation, 2024-11-11T21:47:46,781 WARN [Thread-1931 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:47:46,787 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a7db65b88aea82b with lease ID 0x4d6d8755595a2a51: Processing first storage report for DS-82d49338-5d3c-4449-9b1b-65c46e3a349d from datanode DatanodeRegistration(127.0.0.1:36647, datanodeUuid=462bc2be-9f40-4149-b1c2-b792bfaa9f15, infoPort=43015, infoSecurePort=0, ipcPort=41727, storageInfo=lv=-57;cid=testClusterID;nsid=1860894518;c=1731361666251) 2024-11-11T21:47:46,787 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a7db65b88aea82b with lease ID 0x4d6d8755595a2a51: from storage DS-82d49338-5d3c-4449-9b1b-65c46e3a349d node DatanodeRegistration(127.0.0.1:36647, datanodeUuid=462bc2be-9f40-4149-b1c2-b792bfaa9f15, infoPort=43015, infoSecurePort=0, ipcPort=41727, storageInfo=lv=-57;cid=testClusterID;nsid=1860894518;c=1731361666251), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:47:46,787 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a7db65b88aea82b with lease ID 0x4d6d8755595a2a51: Processing first storage report for DS-eeb34640-a058-4dc7-93df-af5905258453 from datanode DatanodeRegistration(127.0.0.1:36647, datanodeUuid=462bc2be-9f40-4149-b1c2-b792bfaa9f15, infoPort=43015, infoSecurePort=0, ipcPort=41727, storageInfo=lv=-57;cid=testClusterID;nsid=1860894518;c=1731361666251) 2024-11-11T21:47:46,787 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a7db65b88aea82b with lease ID 0x4d6d8755595a2a51: from storage DS-eeb34640-a058-4dc7-93df-af5905258453 node DatanodeRegistration(127.0.0.1:36647, datanodeUuid=462bc2be-9f40-4149-b1c2-b792bfaa9f15, infoPort=43015, infoSecurePort=0, ipcPort=41727, storageInfo=lv=-57;cid=testClusterID;nsid=1860894518;c=1731361666251), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:47:46,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9612b29{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/java.io.tmpdir/jetty-localhost-41029-hadoop-hdfs-3_4_1-tests_jar-_-any-2536704047830467859/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:47:46,852 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@314e7370{HTTP/1.1, (http/1.1)}{localhost:41029} 2024-11-11T21:47:46,852 INFO [Time-limited test {}] server.Server(415): Started @241251ms 2024-11-11T21:47:46,854 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:47:46,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:46,924 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/data/data3/current/BP-1724013944-172.17.0.2-1731361666251/current, will proceed with Du for space computation calculation, 2024-11-11T21:47:46,924 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/data/data4/current/BP-1724013944-172.17.0.2-1731361666251/current, will proceed with Du for space computation calculation, 2024-11-11T21:47:46,949 WARN [Thread-1967 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:47:46,951 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x284ef9661b956703 with lease ID 0x4d6d8755595a2a52: Processing first storage report for DS-8289a6ed-5045-4487-9cb5-b99f4e068d4c from datanode DatanodeRegistration(127.0.0.1:43361, datanodeUuid=a86f15ed-e888-4cc1-907f-709446a36319, infoPort=33915, infoSecurePort=0, ipcPort=43379, storageInfo=lv=-57;cid=testClusterID;nsid=1860894518;c=1731361666251) 2024-11-11T21:47:46,951 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x284ef9661b956703 with lease ID 0x4d6d8755595a2a52: from storage DS-8289a6ed-5045-4487-9cb5-b99f4e068d4c node DatanodeRegistration(127.0.0.1:43361, datanodeUuid=a86f15ed-e888-4cc1-907f-709446a36319, infoPort=33915, infoSecurePort=0, ipcPort=43379, storageInfo=lv=-57;cid=testClusterID;nsid=1860894518;c=1731361666251), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:47:46,951 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x284ef9661b956703 with lease ID 0x4d6d8755595a2a52: Processing first storage report for DS-71fe83bd-0f58-4c18-aeb3-cdcdfdd5875a from datanode DatanodeRegistration(127.0.0.1:43361, datanodeUuid=a86f15ed-e888-4cc1-907f-709446a36319, infoPort=33915, infoSecurePort=0, ipcPort=43379, storageInfo=lv=-57;cid=testClusterID;nsid=1860894518;c=1731361666251) 2024-11-11T21:47:46,951 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x284ef9661b956703 with lease ID 0x4d6d8755595a2a52: from storage DS-71fe83bd-0f58-4c18-aeb3-cdcdfdd5875a node DatanodeRegistration(127.0.0.1:43361, datanodeUuid=a86f15ed-e888-4cc1-907f-709446a36319, infoPort=33915, infoSecurePort=0, ipcPort=43379, storageInfo=lv=-57;cid=testClusterID;nsid=1860894518;c=1731361666251), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T21:47:46,997 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703 2024-11-11T21:47:47,001 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/zookeeper_0, clientPort=58844, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T21:47:47,002 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58844 2024-11-11T21:47:47,002 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:47:47,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:47:47,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:47:47,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:47:47,022 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381 with version=8 2024-11-11T21:47:47,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/hbase-staging 2024-11-11T21:47:47,025 INFO [Time-limited test {}] client.ConnectionUtils(128): master/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:47:47,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:47:47,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:47:47,025 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:47:47,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:47:47,025 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:47:47,025 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T21:47:47,025 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:47:47,028 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45953 2024-11-11T21:47:47,030 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45953 connecting to ZooKeeper ensemble=127.0.0.1:58844 2024-11-11T21:47:47,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:459530x0, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:47:47,039 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45953-0x10030c1d4250000 connected 2024-11-11T21:47:47,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:47:47,069 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:47:47,074 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:47:47,074 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381, hbase.cluster.distributed=false 2024-11-11T21:47:47,075 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:47:47,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45953 2024-11-11T21:47:47,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45953 2024-11-11T21:47:47,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45953 2024-11-11T21:47:47,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45953 2024-11-11T21:47:47,077 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45953 2024-11-11T21:47:47,093 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:47:47,093 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:47:47,093 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:47:47,093 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:47:47,093 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:47:47,093 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:47:47,093 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T21:47:47,093 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:47:47,096 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33459 2024-11-11T21:47:47,098 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33459 connecting to ZooKeeper ensemble=127.0.0.1:58844 2024-11-11T21:47:47,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:47:47,100 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:47:47,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334590x0, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:47:47,106 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:334590x0, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:47:47,106 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33459-0x10030c1d4250001 connected 2024-11-11T21:47:47,106 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T21:47:47,108 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T21:47:47,109 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T21:47:47,110 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:47:47,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33459 2024-11-11T21:47:47,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33459 2024-11-11T21:47:47,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33459 2024-11-11T21:47:47,113 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33459 2024-11-11T21:47:47,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33459 2024-11-11T21:47:47,125 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;22a1f13b90b0:45953 2024-11-11T21:47:47,126 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/22a1f13b90b0,45953,1731361667024 2024-11-11T21:47:47,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:47:47,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:47:47,128 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/22a1f13b90b0,45953,1731361667024 2024-11-11T21:47:47,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T21:47:47,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,129 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T21:47:47,129 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/22a1f13b90b0,45953,1731361667024 from backup master directory 2024-11-11T21:47:47,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/22a1f13b90b0,45953,1731361667024 2024-11-11T21:47:47,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:47:47,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:47:47,130 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:47:47,130 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=22a1f13b90b0,45953,1731361667024 2024-11-11T21:47:47,134 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/hbase.id] with ID: 04ecebca-42e1-4eb8-87be-40056e2bdc0d 2024-11-11T21:47:47,134 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/.tmp/hbase.id 2024-11-11T21:47:47,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:47:47,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:47:47,143 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/.tmp/hbase.id]:[hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/hbase.id] 2024-11-11T21:47:47,157 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:47:47,157 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T21:47:47,158 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T21:47:47,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:47:47,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:47:47,176 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:47:47,177 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T21:47:47,177 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:47:47,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:47:47,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:47:47,186 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store 2024-11-11T21:47:47,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:47:47,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:47:47,200 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:47:47,200 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:47:47,200 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:47:47,200 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:47:47,200 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:47:47,200 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:47:47,200 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:47:47,200 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361667200Disabling compacts and flushes for region at 1731361667200Disabling writes for close at 1731361667200Writing region close event to WAL at 1731361667200Closed at 1731361667200 2024-11-11T21:47:47,201 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/.initializing 2024-11-11T21:47:47,201 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/WALs/22a1f13b90b0,45953,1731361667024 2024-11-11T21:47:47,204 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C45953%2C1731361667024, suffix=, logDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/WALs/22a1f13b90b0,45953,1731361667024, archiveDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/oldWALs, maxLogs=10 2024-11-11T21:47:47,205 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C45953%2C1731361667024.1731361667205 2024-11-11T21:47:47,210 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/WALs/22a1f13b90b0,45953,1731361667024/22a1f13b90b0%2C45953%2C1731361667024.1731361667205 2024-11-11T21:47:47,213 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43015:43015),(127.0.0.1/127.0.0.1:33915:33915)] 2024-11-11T21:47:47,214 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:47:47,214 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:47:47,214 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,214 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T21:47:47,217 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:47:47,217 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T21:47:47,218 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:47:47,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T21:47:47,220 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,220 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:47:47,221 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,222 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T21:47:47,222 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:47:47,223 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,224 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,224 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,225 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,225 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,226 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T21:47:47,227 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:47:47,230 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:47:47,230 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=853716, jitterRate=0.08555677533149719}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T21:47:47,231 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731361667214Initializing all the Stores at 1731361667215 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361667215Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361667215Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361667215Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361667215Cleaning up temporary data from old regions at 1731361667225 (+10 ms)Region opened successfully at 1731361667231 (+6 ms) 2024-11-11T21:47:47,233 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T21:47:47,236 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58a82b07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:47:47,237 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T21:47:47,237 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T21:47:47,238 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T21:47:47,238 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T21:47:47,238 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T21:47:47,239 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T21:47:47,239 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T21:47:47,243 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T21:47:47,243 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T21:47:47,244 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T21:47:47,245 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T21:47:47,246 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T21:47:47,246 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T21:47:47,247 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T21:47:47,250 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T21:47:47,250 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T21:47:47,252 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T21:47:47,252 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T21:47:47,254 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T21:47:47,255 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T21:47:47,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:47:47,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:47:47,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,258 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=22a1f13b90b0,45953,1731361667024, sessionid=0x10030c1d4250000, setting cluster-up flag (Was=false) 2024-11-11T21:47:47,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,271 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T21:47:47,272 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,45953,1731361667024 2024-11-11T21:47:47,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,278 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T21:47:47,279 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,45953,1731361667024 2024-11-11T21:47:47,280 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T21:47:47,286 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T21:47:47,286 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T21:47:47,287 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T21:47:47,287 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 22a1f13b90b0,45953,1731361667024 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T21:47:47,289 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:47:47,289 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:47:47,289 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:47:47,289 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:47:47,289 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/22a1f13b90b0:0, corePoolSize=10, maxPoolSize=10 2024-11-11T21:47:47,289 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,289 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:47:47,289 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,299 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:47:47,299 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731361697299 2024-11-11T21:47:47,299 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T21:47:47,299 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T21:47:47,299 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T21:47:47,299 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T21:47:47,299 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T21:47:47,299 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T21:47:47,299 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T21:47:47,300 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,300 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T21:47:47,301 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,305 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T21:47:47,305 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T21:47:47,305 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T21:47:47,308 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T21:47:47,308 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T21:47:47,309 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361667309,5,FailOnTimeoutGroup] 2024-11-11T21:47:47,311 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361667309,5,FailOnTimeoutGroup] 2024-11-11T21:47:47,311 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,311 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T21:47:47,311 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,311 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:47:47,317 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(746): ClusterId : 04ecebca-42e1-4eb8-87be-40056e2bdc0d 2024-11-11T21:47:47,317 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T21:47:47,318 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T21:47:47,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:47:47,319 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T21:47:47,320 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T21:47:47,320 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T21:47:47,320 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381 2024-11-11T21:47:47,320 DEBUG [RS:0;22a1f13b90b0:33459 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bbf72d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:47:47,333 DEBUG [RS:0;22a1f13b90b0:33459 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;22a1f13b90b0:33459 2024-11-11T21:47:47,333 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T21:47:47,333 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T21:47:47,333 DEBUG [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T21:47:47,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:47:47,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:47:47,335 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(2659): reportForDuty to master=22a1f13b90b0,45953,1731361667024 with port=33459, startcode=1731361667092 2024-11-11T21:47:47,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:47:47,335 DEBUG [RS:0;22a1f13b90b0:33459 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T21:47:47,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:47:47,339 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:47:47,339 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,339 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57571, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T21:47:47,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:47:47,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:47:47,340 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45953 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:47,340 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45953 {}] master.ServerManager(517): Registering regionserver=22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:47,341 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:47:47,341 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,342 DEBUG [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381 2024-11-11T21:47:47,342 DEBUG [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38643 2024-11-11T21:47:47,342 DEBUG [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T21:47:47,342 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:47:47,342 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:47:47,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:47:47,344 DEBUG [RS:0;22a1f13b90b0:33459 {}] zookeeper.ZKUtil(111): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:47,344 WARN [RS:0;22a1f13b90b0:33459 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:47:47,344 INFO [RS:0;22a1f13b90b0:33459 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:47:47,344 DEBUG [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:47,344 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:47:47,344 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:47:47,345 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [22a1f13b90b0,33459,1731361667092] 2024-11-11T21:47:47,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:47:47,346 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:47:47,347 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:47:47,347 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:47:47,348 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740 2024-11-11T21:47:47,348 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740 2024-11-11T21:47:47,349 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:47:47,349 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:47:47,350 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:47:47,352 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T21:47:47,352 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:47:47,357 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T21:47:47,357 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:47:47,357 INFO [RS:0;22a1f13b90b0:33459 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T21:47:47,357 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,358 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856725, jitterRate=0.08938281238079071}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:47:47,358 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731361667335Initializing all the Stores at 1731361667336 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361667337 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361667337Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361667337Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361667337Cleaning up temporary data from old regions at 1731361667349 (+12 ms)Region opened successfully at 1731361667358 (+9 ms) 2024-11-11T21:47:47,358 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:47:47,358 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:47:47,358 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:47:47,358 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:47:47,358 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:47:47,361 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T21:47:47,362 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T21:47:47,362 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,362 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,362 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,362 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,362 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,362 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,362 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:47:47,363 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,363 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,363 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,363 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,363 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,363 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:47:47,363 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:47:47,363 DEBUG [RS:0;22a1f13b90b0:33459 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:47:47,364 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:47:47,364 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361667358Disabling compacts and flushes for region at 1731361667358Disabling writes for close at 1731361667358Writing region close event to WAL at 1731361667364 (+6 ms)Closed at 1731361667364 2024-11-11T21:47:47,365 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,365 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,365 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,365 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,365 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,365 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,33459,1731361667092-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:47:47,365 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:47:47,365 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T21:47:47,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T21:47:47,374 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:47:47,375 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T21:47:47,388 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T21:47:47,389 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,33459,1731361667092-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,389 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,389 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.Replication(171): 22a1f13b90b0,33459,1731361667092 started 2024-11-11T21:47:47,409 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:47,409 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(1482): Serving as 22a1f13b90b0,33459,1731361667092, RpcServer on 22a1f13b90b0/172.17.0.2:33459, sessionid=0x10030c1d4250001 2024-11-11T21:47:47,410 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T21:47:47,410 DEBUG [RS:0;22a1f13b90b0:33459 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:47,410 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,33459,1731361667092' 2024-11-11T21:47:47,410 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T21:47:47,410 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T21:47:47,411 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T21:47:47,411 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T21:47:47,411 DEBUG [RS:0;22a1f13b90b0:33459 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:47,411 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,33459,1731361667092' 2024-11-11T21:47:47,411 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T21:47:47,411 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T21:47:47,412 DEBUG [RS:0;22a1f13b90b0:33459 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T21:47:47,412 INFO [RS:0;22a1f13b90b0:33459 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T21:47:47,412 INFO [RS:0;22a1f13b90b0:33459 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T21:47:47,514 INFO [RS:0;22a1f13b90b0:33459 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C33459%2C1731361667092, suffix=, logDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092, archiveDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/oldWALs, maxLogs=32 2024-11-11T21:47:47,514 INFO [RS:0;22a1f13b90b0:33459 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33459%2C1731361667092.1731361667514 2024-11-11T21:47:47,524 INFO [RS:0;22a1f13b90b0:33459 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092/22a1f13b90b0%2C33459%2C1731361667092.1731361667514 2024-11-11T21:47:47,526 WARN [22a1f13b90b0:45953 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T21:47:47,531 DEBUG [RS:0;22a1f13b90b0:33459 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43015:43015),(127.0.0.1/127.0.0.1:33915:33915)] 2024-11-11T21:47:47,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:47,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:47,776 DEBUG [22a1f13b90b0:45953 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T21:47:47,776 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:47,778 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,33459,1731361667092, state=OPENING 2024-11-11T21:47:47,779 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T21:47:47,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:47:47,780 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:47:47,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:47:47,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:47:47,780 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,33459,1731361667092}] 2024-11-11T21:47:47,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:47,933 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T21:47:47,935 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52563, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T21:47:47,939 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T21:47:47,939 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:47:47,941 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C33459%2C1731361667092.meta, suffix=.meta, logDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092, archiveDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/oldWALs, maxLogs=32 2024-11-11T21:47:47,941 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33459%2C1731361667092.meta.1731361667941.meta 2024-11-11T21:47:47,950 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092/22a1f13b90b0%2C33459%2C1731361667092.meta.1731361667941.meta 2024-11-11T21:47:47,957 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43015:43015),(127.0.0.1/127.0.0.1:33915:33915)] 2024-11-11T21:47:47,958 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:47:47,959 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T21:47:47,959 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T21:47:47,959 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T21:47:47,959 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T21:47:47,959 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:47:47,959 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T21:47:47,959 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T21:47:47,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:47:47,962 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:47:47,962 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:47:47,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:47:47,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:47:47,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:47:47,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:47:47,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:47:47,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:47:47,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:47:47,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:47:47,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:47,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:47:47,967 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:47:47,968 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740 2024-11-11T21:47:47,969 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740 2024-11-11T21:47:47,970 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:47:47,970 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:47:47,971 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:47:47,972 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:47:47,973 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787417, jitterRate=0.0012530088424682617}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:47:47,973 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T21:47:47,974 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731361667959Writing region info on filesystem at 1731361667959Initializing all the Stores at 1731361667960 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361667960Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361667960Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361667960Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361667960Cleaning up temporary data from old regions at 1731361667970 (+10 ms)Running coprocessor post-open hooks at 1731361667973 (+3 ms)Region opened successfully at 1731361667974 (+1 ms) 2024-11-11T21:47:47,975 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731361667933 2024-11-11T21:47:47,977 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T21:47:47,977 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T21:47:47,978 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:47,980 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,33459,1731361667092, state=OPEN 2024-11-11T21:47:47,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:47:47,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:47:47,982 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:47,982 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:47:47,982 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:47:47,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T21:47:47,985 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,33459,1731361667092 in 202 msec 2024-11-11T21:47:47,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T21:47:47,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 619 msec 2024-11-11T21:47:47,988 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:47:47,988 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T21:47:47,989 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:47:47,989 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,33459,1731361667092, seqNum=-1] 2024-11-11T21:47:47,989 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:47:47,991 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46907, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:47:47,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 711 msec 2024-11-11T21:47:47,997 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731361667997, completionTime=-1 2024-11-11T21:47:47,997 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T21:47:47,997 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T21:47:47,999 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T21:47:47,999 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731361727999 2024-11-11T21:47:47,999 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731361787999 2024-11-11T21:47:47,999 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-11T21:47:47,999 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45953,1731361667024-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:48,000 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45953,1731361667024-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:48,000 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45953,1731361667024-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:48,000 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-22a1f13b90b0:45953, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:48,000 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:48,000 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:48,002 DEBUG [master/22a1f13b90b0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T21:47:48,004 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.874sec 2024-11-11T21:47:48,004 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T21:47:48,004 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T21:47:48,004 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T21:47:48,004 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T21:47:48,004 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T21:47:48,005 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45953,1731361667024-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:47:48,005 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45953,1731361667024-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T21:47:48,007 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T21:47:48,007 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T21:47:48,007 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45953,1731361667024-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:47:48,017 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@208716aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:47:48,017 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 22a1f13b90b0,45953,-1 for getting cluster id 2024-11-11T21:47:48,017 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T21:47:48,019 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '04ecebca-42e1-4eb8-87be-40056e2bdc0d' 2024-11-11T21:47:48,019 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T21:47:48,019 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "04ecebca-42e1-4eb8-87be-40056e2bdc0d" 2024-11-11T21:47:48,019 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@521e2234, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:47:48,019 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [22a1f13b90b0,45953,-1] 2024-11-11T21:47:48,020 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T21:47:48,020 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:47:48,021 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T21:47:48,022 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181dda1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:47:48,022 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:47:48,023 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,33459,1731361667092, seqNum=-1] 2024-11-11T21:47:48,023 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:47:48,024 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48966, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:47:48,026 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=22a1f13b90b0,45953,1731361667024 2024-11-11T21:47:48,026 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:47:48,028 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T21:47:48,029 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T21:47:48,029 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 22a1f13b90b0,45953,1731361667024 2024-11-11T21:47:48,029 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@69624f08 2024-11-11T21:47:48,030 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T21:47:48,031 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39402, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T21:47:48,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45953 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-11T21:47:48,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45953 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-11T21:47:48,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45953 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:47:48,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45953 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-11T21:47:48,034 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T21:47:48,034 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:48,034 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45953 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-11T21:47:48,035 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T21:47:48,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45953 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:47:48,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741835_1011 (size=381) 2024-11-11T21:47:48,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741835_1011 (size=381) 2024-11-11T21:47:48,043 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 793e534008933f55fa324116a0af01cf, NAME => 'TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381 2024-11-11T21:47:48,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741836_1012 (size=64) 2024-11-11T21:47:48,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741836_1012 (size=64) 2024-11-11T21:47:48,051 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:47:48,051 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 793e534008933f55fa324116a0af01cf, disabling compactions & flushes 2024-11-11T21:47:48,051 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:47:48,051 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:47:48,051 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. after waiting 0 ms 2024-11-11T21:47:48,051 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:47:48,051 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:47:48,051 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 793e534008933f55fa324116a0af01cf: Waiting for close lock at 1731361668051Disabling compacts and flushes for region at 1731361668051Disabling writes for close at 1731361668051Writing region close event to WAL at 1731361668051Closed at 1731361668051 2024-11-11T21:47:48,052 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T21:47:48,052 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731361668052"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731361668052"}]},"ts":"1731361668052"} 2024-11-11T21:47:48,055 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T21:47:48,055 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T21:47:48,056 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361668055"}]},"ts":"1731361668055"} 2024-11-11T21:47:48,057 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-11T21:47:48,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=793e534008933f55fa324116a0af01cf, ASSIGN}] 2024-11-11T21:47:48,059 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=793e534008933f55fa324116a0af01cf, ASSIGN 2024-11-11T21:47:48,060 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=793e534008933f55fa324116a0af01cf, ASSIGN; state=OFFLINE, location=22a1f13b90b0,33459,1731361667092; forceNewPlan=false, retain=false 2024-11-11T21:47:48,210 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=793e534008933f55fa324116a0af01cf, regionState=OPENING, regionLocation=22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:48,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=793e534008933f55fa324116a0af01cf, ASSIGN because future has completed 2024-11-11T21:47:48,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 793e534008933f55fa324116a0af01cf, server=22a1f13b90b0,33459,1731361667092}] 2024-11-11T21:47:48,371 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:47:48,371 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 793e534008933f55fa324116a0af01cf, NAME => 'TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:47:48,372 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,372 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:47:48,372 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,372 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,376 INFO [StoreOpener-793e534008933f55fa324116a0af01cf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,378 INFO [StoreOpener-793e534008933f55fa324116a0af01cf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 793e534008933f55fa324116a0af01cf columnFamilyName info 2024-11-11T21:47:48,378 DEBUG [StoreOpener-793e534008933f55fa324116a0af01cf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:47:48,379 INFO [StoreOpener-793e534008933f55fa324116a0af01cf-1 {}] regionserver.HStore(327): Store=793e534008933f55fa324116a0af01cf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:47:48,379 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,380 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,380 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,380 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,381 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,382 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,384 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:47:48,385 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 793e534008933f55fa324116a0af01cf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737283, jitterRate=-0.06249627470970154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T21:47:48,385 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:48,386 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 793e534008933f55fa324116a0af01cf: Running coprocessor pre-open hook at 1731361668372Writing region info on filesystem at 1731361668372Initializing all the Stores at 1731361668373 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361668373Cleaning up temporary data from old regions at 1731361668381 (+8 ms)Running coprocessor post-open hooks at 1731361668385 (+4 ms)Region opened successfully at 1731361668386 (+1 ms) 2024-11-11T21:47:48,387 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., pid=6, masterSystemTime=1731361668366 2024-11-11T21:47:48,390 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:47:48,390 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:47:48,391 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=793e534008933f55fa324116a0af01cf, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,33459,1731361667092 2024-11-11T21:47:48,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 793e534008933f55fa324116a0af01cf, server=22a1f13b90b0,33459,1731361667092 because future has completed 2024-11-11T21:47:48,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T21:47:48,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 793e534008933f55fa324116a0af01cf, server=22a1f13b90b0,33459,1731361667092 in 183 msec 2024-11-11T21:47:48,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T21:47:48,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=793e534008933f55fa324116a0af01cf, ASSIGN in 342 msec 2024-11-11T21:47:48,404 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T21:47:48,405 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731361668404"}]},"ts":"1731361668404"} 2024-11-11T21:47:48,407 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-11T21:47:48,409 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T21:47:48,411 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 377 msec 2024-11-11T21:47:48,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:48,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:48,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:49,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:49,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:49,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:50,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:50,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:50,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,719 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:50,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:51,252 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T21:47:51,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,284 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,287 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:51,463 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-11T21:47:51,463 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-11T21:47:51,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-11T21:47:51,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:51,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:51,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:52,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:52,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:52,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:53,352 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T21:47:53,352 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-11T21:47:53,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:53,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:53,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:54,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:54,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:54,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:55,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:55,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:55,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:56,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:56,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:56,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:56,967 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T21:47:56,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,970 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:56,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:57,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:57,001 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:57,001 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:57,003 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:47:57,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:57,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:57,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:58,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45953 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T21:47:58,122 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-11T21:47:58,122 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-11T21:47:58,125 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-11T21:47:58,125 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:47:58,128 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., hostname=22a1f13b90b0,33459,1731361667092, seqNum=2] 2024-11-11T21:47:58,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:58,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 793e534008933f55fa324116a0af01cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:47:58,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/028f629d7c2c473481cef8a1d5f42226 is 1080, key is row0001/info:/1731361678130/Put/seqid=0 2024-11-11T21:47:58,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741837_1013 (size=12509) 2024-11-11T21:47:58,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741837_1013 (size=12509) 2024-11-11T21:47:58,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/028f629d7c2c473481cef8a1d5f42226 2024-11-11T21:47:58,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/028f629d7c2c473481cef8a1d5f42226 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/028f629d7c2c473481cef8a1d5f42226 2024-11-11T21:47:58,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/028f629d7c2c473481cef8a1d5f42226, entries=7, sequenceid=11, filesize=12.2 K 2024-11-11T21:47:58,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for 793e534008933f55fa324116a0af01cf in 48ms, sequenceid=11, compaction requested=false 2024-11-11T21:47:58,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 793e534008933f55fa324116a0af01cf: 2024-11-11T21:47:58,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on 793e534008933f55fa324116a0af01cf 2024-11-11T21:47:58,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 793e534008933f55fa324116a0af01cf 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-11-11T21:47:58,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/b4f0c7049193415cbc1bcc0885437e29 is 1080, key is row0008/info:/1731361678145/Put/seqid=0 2024-11-11T21:47:58,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741838_1014 (size=27607) 2024-11-11T21:47:58,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741838_1014 (size=27607) 2024-11-11T21:47:58,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/b4f0c7049193415cbc1bcc0885437e29 2024-11-11T21:47:58,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/b4f0c7049193415cbc1bcc0885437e29 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/b4f0c7049193415cbc1bcc0885437e29 2024-11-11T21:47:58,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/b4f0c7049193415cbc1bcc0885437e29, entries=21, sequenceid=35, filesize=27.0 K 2024-11-11T21:47:58,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=4.20 KB/4304 for 793e534008933f55fa324116a0af01cf in 33ms, sequenceid=35, compaction requested=false 2024-11-11T21:47:58,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 793e534008933f55fa324116a0af01cf: 2024-11-11T21:47:58,225 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.2 K, sizeToCheck=16.0 K 2024-11-11T21:47:58,225 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:47:58,225 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/b4f0c7049193415cbc1bcc0885437e29 because midkey is the same as first or last row 2024-11-11T21:47:58,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:58,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:58,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:59,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:59,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:47:59,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:00,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 793e534008933f55fa324116a0af01cf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:48:00,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/9b0488e81ecd4df3a14d0022cf2a9249 is 1080, key is row0029/info:/1731361678193/Put/seqid=0 2024-11-11T21:48:00,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741839_1015 (size=12509) 2024-11-11T21:48:00,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741839_1015 (size=12509) 2024-11-11T21:48:00,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/9b0488e81ecd4df3a14d0022cf2a9249 2024-11-11T21:48:00,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/9b0488e81ecd4df3a14d0022cf2a9249 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/9b0488e81ecd4df3a14d0022cf2a9249 2024-11-11T21:48:00,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/9b0488e81ecd4df3a14d0022cf2a9249, entries=7, sequenceid=45, filesize=12.2 K 2024-11-11T21:48:00,239 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 793e534008933f55fa324116a0af01cf in 27ms, sequenceid=45, compaction requested=true 2024-11-11T21:48:00,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 793e534008933f55fa324116a0af01cf: 2024-11-11T21:48:00,240 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=51.4 K, sizeToCheck=16.0 K 2024-11-11T21:48:00,240 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:00,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:00,241 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/b4f0c7049193415cbc1bcc0885437e29 because midkey is the same as first or last row 2024-11-11T21:48:00,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 793e534008933f55fa324116a0af01cf:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:00,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:00,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 793e534008933f55fa324116a0af01cf 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T21:48:00,242 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:48:00,243 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 52625 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:48:00,244 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1541): 793e534008933f55fa324116a0af01cf/info is initiating minor compaction (all files) 2024-11-11T21:48:00,244 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 793e534008933f55fa324116a0af01cf/info in TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:48:00,245 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/028f629d7c2c473481cef8a1d5f42226, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/b4f0c7049193415cbc1bcc0885437e29, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/9b0488e81ecd4df3a14d0022cf2a9249] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp, totalSize=51.4 K 2024-11-11T21:48:00,246 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 028f629d7c2c473481cef8a1d5f42226, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731361678130 2024-11-11T21:48:00,247 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting b4f0c7049193415cbc1bcc0885437e29, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=35, earliestPutTs=1731361678145 2024-11-11T21:48:00,248 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b0488e81ecd4df3a14d0022cf2a9249, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731361678193 2024-11-11T21:48:00,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/ae9a52ac520445b9bf84124239f6f9d2 is 1080, key is row0036/info:/1731361680212/Put/seqid=0 2024-11-11T21:48:00,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741840_1016 (size=17894) 2024-11-11T21:48:00,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741840_1016 (size=17894) 2024-11-11T21:48:00,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/ae9a52ac520445b9bf84124239f6f9d2 2024-11-11T21:48:00,300 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 793e534008933f55fa324116a0af01cf#info#compaction#58 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:00,300 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/4e6c0310504b4e46b754f94c2311d00f is 1080, key is row0001/info:/1731361678130/Put/seqid=0 2024-11-11T21:48:00,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/ae9a52ac520445b9bf84124239f6f9d2 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/ae9a52ac520445b9bf84124239f6f9d2 2024-11-11T21:48:00,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/ae9a52ac520445b9bf84124239f6f9d2, entries=12, sequenceid=60, filesize=17.5 K 2024-11-11T21:48:00,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 793e534008933f55fa324116a0af01cf in 75ms, sequenceid=60, compaction requested=false 2024-11-11T21:48:00,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 793e534008933f55fa324116a0af01cf: 2024-11-11T21:48:00,317 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.9 K, sizeToCheck=16.0 K 2024-11-11T21:48:00,317 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:00,317 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/b4f0c7049193415cbc1bcc0885437e29 because midkey is the same as first or last row 2024-11-11T21:48:00,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741841_1017 (size=42824) 2024-11-11T21:48:00,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741841_1017 (size=42824) 2024-11-11T21:48:00,328 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/4e6c0310504b4e46b754f94c2311d00f as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f 2024-11-11T21:48:00,336 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 793e534008933f55fa324116a0af01cf/info of 793e534008933f55fa324116a0af01cf into 4e6c0310504b4e46b754f94c2311d00f(size=41.8 K), total size for store is 59.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:00,336 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 793e534008933f55fa324116a0af01cf: 2024-11-11T21:48:00,336 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., storeName=793e534008933f55fa324116a0af01cf/info, priority=13, startTime=1731361680241; duration=0sec 2024-11-11T21:48:00,336 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-11T21:48:00,336 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:00,336 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f because midkey is the same as first or last row 2024-11-11T21:48:00,336 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-11T21:48:00,337 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:00,337 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f because midkey is the same as first or last row 2024-11-11T21:48:00,337 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-11-11T21:48:00,337 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:00,337 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f because midkey is the same as first or last row 2024-11-11T21:48:00,337 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:00,337 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 793e534008933f55fa324116a0af01cf:info 2024-11-11T21:48:00,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:00,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:00,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:01,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:01,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:01,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:02,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 793e534008933f55fa324116a0af01cf 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-11T21:48:02,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/00e55312ae8541689e893d165c33b8cd is 1080, key is row0048/info:/1731361680242/Put/seqid=0 2024-11-11T21:48:02,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741842_1018 (size=24376) 2024-11-11T21:48:02,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741842_1018 (size=24376) 2024-11-11T21:48:02,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/00e55312ae8541689e893d165c33b8cd 2024-11-11T21:48:02,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/00e55312ae8541689e893d165c33b8cd as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/00e55312ae8541689e893d165c33b8cd 2024-11-11T21:48:02,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/00e55312ae8541689e893d165c33b8cd, entries=18, sequenceid=82, filesize=23.8 K 2024-11-11T21:48:02,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=10.51 KB/10760 for 793e534008933f55fa324116a0af01cf in 27ms, sequenceid=82, compaction requested=true 2024-11-11T21:48:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 793e534008933f55fa324116a0af01cf: 2024-11-11T21:48:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-11T21:48:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f because midkey is the same as first or last row 2024-11-11T21:48:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 793e534008933f55fa324116a0af01cf:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:02,320 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:48:02,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 793e534008933f55fa324116a0af01cf 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-11T21:48:02,321 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:48:02,321 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HStore(1541): 793e534008933f55fa324116a0af01cf/info is initiating minor compaction (all files) 2024-11-11T21:48:02,322 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 793e534008933f55fa324116a0af01cf/info in TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:48:02,322 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/ae9a52ac520445b9bf84124239f6f9d2, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/00e55312ae8541689e893d165c33b8cd] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp, totalSize=83.1 K 2024-11-11T21:48:02,322 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] compactions.Compactor(225): Compacting 4e6c0310504b4e46b754f94c2311d00f, keycount=35, bloomtype=ROW, size=41.8 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731361678130 2024-11-11T21:48:02,322 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] compactions.Compactor(225): Compacting ae9a52ac520445b9bf84124239f6f9d2, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731361680212 2024-11-11T21:48:02,323 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] compactions.Compactor(225): Compacting 00e55312ae8541689e893d165c33b8cd, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731361680242 2024-11-11T21:48:02,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/756a858acbd0426781d9a28cc21a5cef is 1080, key is row0066/info:/1731361682295/Put/seqid=0 2024-11-11T21:48:02,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741843_1019 (size=16817) 2024-11-11T21:48:02,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741843_1019 (size=16817) 2024-11-11T21:48:02,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/756a858acbd0426781d9a28cc21a5cef 2024-11-11T21:48:02,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/756a858acbd0426781d9a28cc21a5cef as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/756a858acbd0426781d9a28cc21a5cef 2024-11-11T21:48:02,339 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 793e534008933f55fa324116a0af01cf#info#compaction#61 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:02,339 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/e2c37e3d2455400c8c5701f98d8429ba is 1080, key is row0001/info:/1731361678130/Put/seqid=0 2024-11-11T21:48:02,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741844_1020 (size=75378) 2024-11-11T21:48:02,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741844_1020 (size=75378) 2024-11-11T21:48:02,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/756a858acbd0426781d9a28cc21a5cef, entries=11, sequenceid=96, filesize=16.4 K 2024-11-11T21:48:02,347 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 793e534008933f55fa324116a0af01cf in 27ms, sequenceid=96, compaction requested=false 2024-11-11T21:48:02,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 793e534008933f55fa324116a0af01cf: 2024-11-11T21:48:02,348 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=99.5 K, sizeToCheck=16.0 K 2024-11-11T21:48:02,348 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:02,348 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f because midkey is the same as first or last row 2024-11-11T21:48:02,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 793e534008933f55fa324116a0af01cf 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-11T21:48:02,352 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/e2c37e3d2455400c8c5701f98d8429ba as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/e2c37e3d2455400c8c5701f98d8429ba 2024-11-11T21:48:02,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/cfb2330cc30543f59e1c423942627c96 is 1080, key is row0077/info:/1731361682322/Put/seqid=0 2024-11-11T21:48:02,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741845_1021 (size=16817) 2024-11-11T21:48:02,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741845_1021 (size=16817) 2024-11-11T21:48:02,360 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 793e534008933f55fa324116a0af01cf/info of 793e534008933f55fa324116a0af01cf into e2c37e3d2455400c8c5701f98d8429ba(size=73.6 K), total size for store is 90.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:02,360 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 793e534008933f55fa324116a0af01cf: 2024-11-11T21:48:02,360 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., storeName=793e534008933f55fa324116a0af01cf/info, priority=13, startTime=1731361682320; duration=0sec 2024-11-11T21:48:02,361 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=90.0 K, sizeToCheck=16.0 K 2024-11-11T21:48:02,361 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:02,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/cfb2330cc30543f59e1c423942627c96 2024-11-11T21:48:02,361 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=90.0 K, sizeToCheck=16.0 K 2024-11-11T21:48:02,361 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:02,361 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=90.0 K, sizeToCheck=16.0 K 2024-11-11T21:48:02,361 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:02,362 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:02,362 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:02,363 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 793e534008933f55fa324116a0af01cf:info 2024-11-11T21:48:02,364 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45953 {}] assignment.AssignmentManager(1363): Split request from 22a1f13b90b0,33459,1731361667092, parent={ENCODED => 793e534008933f55fa324116a0af01cf, NAME => 'TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-11T21:48:02,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/cfb2330cc30543f59e1c423942627c96 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/cfb2330cc30543f59e1c423942627c96 2024-11-11T21:48:02,371 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45953 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:02,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/cfb2330cc30543f59e1c423942627c96, entries=11, sequenceid=110, filesize=16.4 K 2024-11-11T21:48:02,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 793e534008933f55fa324116a0af01cf in 27ms, sequenceid=110, compaction requested=true 2024-11-11T21:48:02,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 793e534008933f55fa324116a0af01cf: 2024-11-11T21:48:02,375 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=106.5 K, sizeToCheck=16.0 K 2024-11-11T21:48:02,375 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:02,375 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=106.5 K, sizeToCheck=16.0 K 2024-11-11T21:48:02,375 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:02,376 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=106.5 K, sizeToCheck=16.0 K 2024-11-11T21:48:02,376 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-11T21:48:02,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-11T21:48:02,375 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45953 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=eab9b197412b79d6a3d1daed5218ca45, daughterB=a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:02,376 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=eab9b197412b79d6a3d1daed5218ca45, daughterB=a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:02,376 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=eab9b197412b79d6a3d1daed5218ca45, daughterB=a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:02,377 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=eab9b197412b79d6a3d1daed5218ca45, daughterB=a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:02,378 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45953 {}] assignment.AssignmentManager(1363): Split request from 22a1f13b90b0,33459,1731361667092, parent={ENCODED => 793e534008933f55fa324116a0af01cf, NAME => 'TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-11T21:48:02,379 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45953 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:02,379 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45953 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=23de6ba4b50527b70cbd16e029eefa7a, daughterB=f62bdc70658dd0fd41b37f2e5c6593ab 2024-11-11T21:48:02,380 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=23de6ba4b50527b70cbd16e029eefa7a, daughterB=f62bdc70658dd0fd41b37f2e5c6593ab held by pid=7 2024-11-11T21:48:02,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=793e534008933f55fa324116a0af01cf, UNASSIGN}] 2024-11-11T21:48:02,386 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-11T21:48:02,386 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=793e534008933f55fa324116a0af01cf, UNASSIGN 2024-11-11T21:48:02,386 DEBUG [PEWorker-4 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=23de6ba4b50527b70cbd16e029eefa7a, daughterB=f62bdc70658dd0fd41b37f2e5c6593ab 2024-11-11T21:48:02,388 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=793e534008933f55fa324116a0af01cf, regionState=CLOSING, regionLocation=22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:02,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=793e534008933f55fa324116a0af01cf, UNASSIGN because future has completed 2024-11-11T21:48:02,391 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-11T21:48:02,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 793e534008933f55fa324116a0af01cf, server=22a1f13b90b0,33459,1731361667092}] 2024-11-11T21:48:02,549 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,549 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-11T21:48:02,550 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing 793e534008933f55fa324116a0af01cf, disabling compactions & flushes 2024-11-11T21:48:02,550 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:48:02,550 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:48:02,550 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. after waiting 0 ms 2024-11-11T21:48:02,550 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:48:02,550 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing 793e534008933f55fa324116a0af01cf 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-11T21:48:02,554 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/7deb6f518b2d4c14a9806721768027af is 1080, key is row0088/info:/1731361682350/Put/seqid=0 2024-11-11T21:48:02,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741846_1022 (size=14663) 2024-11-11T21:48:02,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741846_1022 (size=14663) 2024-11-11T21:48:02,559 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/7deb6f518b2d4c14a9806721768027af 2024-11-11T21:48:02,564 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/.tmp/info/7deb6f518b2d4c14a9806721768027af as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/7deb6f518b2d4c14a9806721768027af 2024-11-11T21:48:02,569 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/7deb6f518b2d4c14a9806721768027af, entries=9, sequenceid=123, filesize=14.3 K 2024-11-11T21:48:02,570 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 793e534008933f55fa324116a0af01cf in 20ms, sequenceid=123, compaction requested=true 2024-11-11T21:48:02,571 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/028f629d7c2c473481cef8a1d5f42226, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/b4f0c7049193415cbc1bcc0885437e29, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/9b0488e81ecd4df3a14d0022cf2a9249, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/ae9a52ac520445b9bf84124239f6f9d2, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/00e55312ae8541689e893d165c33b8cd] to archive 2024-11-11T21:48:02,572 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T21:48:02,574 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/028f629d7c2c473481cef8a1d5f42226 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/028f629d7c2c473481cef8a1d5f42226 2024-11-11T21:48:02,575 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/b4f0c7049193415cbc1bcc0885437e29 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/b4f0c7049193415cbc1bcc0885437e29 2024-11-11T21:48:02,577 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/4e6c0310504b4e46b754f94c2311d00f 2024-11-11T21:48:02,578 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/9b0488e81ecd4df3a14d0022cf2a9249 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/9b0488e81ecd4df3a14d0022cf2a9249 2024-11-11T21:48:02,579 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/ae9a52ac520445b9bf84124239f6f9d2 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/ae9a52ac520445b9bf84124239f6f9d2 2024-11-11T21:48:02,580 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/00e55312ae8541689e893d165c33b8cd to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/00e55312ae8541689e893d165c33b8cd 2024-11-11T21:48:02,586 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-11T21:48:02,586 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. 2024-11-11T21:48:02,586 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for 793e534008933f55fa324116a0af01cf: Waiting for close lock at 1731361682550Running coprocessor pre-close hooks at 1731361682550Disabling compacts and flushes for region at 1731361682550Disabling writes for close at 1731361682550Obtaining lock to block concurrent updates at 1731361682550Preparing flush snapshotting stores in 793e534008933f55fa324116a0af01cf at 1731361682550Finished memstore snapshotting TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., syncing WAL and waiting on mvcc, flushsize=dataSize=9684, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1731361682550Flushing stores of TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. at 1731361682551 (+1 ms)Flushing 793e534008933f55fa324116a0af01cf/info: creating writer at 1731361682551Flushing 793e534008933f55fa324116a0af01cf/info: appending metadata at 1731361682553 (+2 ms)Flushing 793e534008933f55fa324116a0af01cf/info: closing flushed file at 1731361682553Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b81ed81: reopening flushed file at 1731361682563 (+10 ms)Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 793e534008933f55fa324116a0af01cf in 20ms, sequenceid=123, compaction requested=true at 1731361682570 (+7 ms)Writing region close event to WAL at 1731361682582 (+12 ms)Running coprocessor post-close hooks at 1731361682586 (+4 ms)Closed at 1731361682586 2024-11-11T21:48:02,588 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,589 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=793e534008933f55fa324116a0af01cf, regionState=CLOSED 2024-11-11T21:48:02,591 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 793e534008933f55fa324116a0af01cf, server=22a1f13b90b0,33459,1731361667092 because future has completed 2024-11-11T21:48:02,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T21:48:02,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure 793e534008933f55fa324116a0af01cf, server=22a1f13b90b0,33459,1731361667092 in 201 msec 2024-11-11T21:48:02,596 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-11T21:48:02,596 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=793e534008933f55fa324116a0af01cf, UNASSIGN in 211 msec 2024-11-11T21:48:02,604 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:02,608 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=793e534008933f55fa324116a0af01cf, threads=4 2024-11-11T21:48:02,610 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/e2c37e3d2455400c8c5701f98d8429ba for region: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,610 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/756a858acbd0426781d9a28cc21a5cef for region: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,610 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/7deb6f518b2d4c14a9806721768027af for region: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,610 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/cfb2330cc30543f59e1c423942627c96 for region: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,621 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/cfb2330cc30543f59e1c423942627c96, top=true 2024-11-11T21:48:02,621 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/756a858acbd0426781d9a28cc21a5cef, top=true 2024-11-11T21:48:02,622 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/7deb6f518b2d4c14a9806721768027af, top=true 2024-11-11T21:48:02,630 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-cfb2330cc30543f59e1c423942627c96 for child: a7500af498a2592feb5bc54d0ae1aea4, parent: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,630 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-7deb6f518b2d4c14a9806721768027af for child: a7500af498a2592feb5bc54d0ae1aea4, parent: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741847_1023 (size=27) 2024-11-11T21:48:02,630 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-756a858acbd0426781d9a28cc21a5cef for child: a7500af498a2592feb5bc54d0ae1aea4, parent: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741847_1023 (size=27) 2024-11-11T21:48:02,630 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/756a858acbd0426781d9a28cc21a5cef for region: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,630 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/7deb6f518b2d4c14a9806721768027af for region: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,630 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/cfb2330cc30543f59e1c423942627c96 for region: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741848_1024 (size=27) 2024-11-11T21:48:02,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741848_1024 (size=27) 2024-11-11T21:48:02,645 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/e2c37e3d2455400c8c5701f98d8429ba for region: 793e534008933f55fa324116a0af01cf 2024-11-11T21:48:02,647 DEBUG [PEWorker-1 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 793e534008933f55fa324116a0af01cf Daughter A: [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf] storefiles, Daughter B: [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-756a858acbd0426781d9a28cc21a5cef, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-7deb6f518b2d4c14a9806721768027af, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-cfb2330cc30543f59e1c423942627c96, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf] storefiles. 2024-11-11T21:48:02,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741849_1025 (size=71) 2024-11-11T21:48:02,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741849_1025 (size=71) 2024-11-11T21:48:02,656 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:02,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741850_1026 (size=71) 2024-11-11T21:48:02,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741850_1026 (size=71) 2024-11-11T21:48:02,670 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:02,681 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-11T21:48:02,683 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-11T21:48:02,685 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731361682685"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731361682685"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731361682685"}]},"ts":"1731361682685"} 2024-11-11T21:48:02,685 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731361682685"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731361682685"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731361682685"}]},"ts":"1731361682685"} 2024-11-11T21:48:02,686 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731361682685"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731361682685"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731361682685"}]},"ts":"1731361682685"} 2024-11-11T21:48:02,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eab9b197412b79d6a3d1daed5218ca45, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a7500af498a2592feb5bc54d0ae1aea4, ASSIGN}] 2024-11-11T21:48:02,704 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eab9b197412b79d6a3d1daed5218ca45, ASSIGN 2024-11-11T21:48:02,704 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a7500af498a2592feb5bc54d0ae1aea4, ASSIGN 2024-11-11T21:48:02,705 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eab9b197412b79d6a3d1daed5218ca45, ASSIGN; state=SPLITTING_NEW, location=22a1f13b90b0,33459,1731361667092; forceNewPlan=false, retain=false 2024-11-11T21:48:02,705 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a7500af498a2592feb5bc54d0ae1aea4, ASSIGN; state=SPLITTING_NEW, location=22a1f13b90b0,33459,1731361667092; forceNewPlan=false, retain=false 2024-11-11T21:48:02,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:02,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:02,856 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=a7500af498a2592feb5bc54d0ae1aea4, regionState=OPENING, regionLocation=22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:02,856 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=eab9b197412b79d6a3d1daed5218ca45, regionState=OPENING, regionLocation=22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:02,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eab9b197412b79d6a3d1daed5218ca45, ASSIGN because future has completed 2024-11-11T21:48:02,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure eab9b197412b79d6a3d1daed5218ca45, server=22a1f13b90b0,33459,1731361667092}] 2024-11-11T21:48:02,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a7500af498a2592feb5bc54d0ae1aea4, ASSIGN because future has completed 2024-11-11T21:48:02,861 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure a7500af498a2592feb5bc54d0ae1aea4, server=22a1f13b90b0,33459,1731361667092}] 2024-11-11T21:48:02,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:03,022 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:03,022 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => a7500af498a2592feb5bc54d0ae1aea4, NAME => 'TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-11T21:48:03,022 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,023 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:48:03,023 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,023 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,024 INFO [StoreOpener-a7500af498a2592feb5bc54d0ae1aea4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,025 INFO [StoreOpener-a7500af498a2592feb5bc54d0ae1aea4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a7500af498a2592feb5bc54d0ae1aea4 columnFamilyName info 2024-11-11T21:48:03,025 DEBUG [StoreOpener-a7500af498a2592feb5bc54d0ae1aea4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:03,036 DEBUG [StoreOpener-a7500af498a2592feb5bc54d0ae1aea4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-756a858acbd0426781d9a28cc21a5cef 2024-11-11T21:48:03,041 DEBUG [StoreOpener-a7500af498a2592feb5bc54d0ae1aea4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-7deb6f518b2d4c14a9806721768027af 2024-11-11T21:48:03,044 DEBUG [StoreOpener-a7500af498a2592feb5bc54d0ae1aea4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-cfb2330cc30543f59e1c423942627c96 2024-11-11T21:48:03,051 DEBUG [StoreOpener-a7500af498a2592feb5bc54d0ae1aea4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf->hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/e2c37e3d2455400c8c5701f98d8429ba-top 2024-11-11T21:48:03,051 INFO [StoreOpener-a7500af498a2592feb5bc54d0ae1aea4-1 {}] regionserver.HStore(327): Store=a7500af498a2592feb5bc54d0ae1aea4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:48:03,052 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,053 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,054 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,054 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,054 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,056 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,057 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened a7500af498a2592feb5bc54d0ae1aea4; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804139, jitterRate=0.02251593768596649}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T21:48:03,057 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:03,058 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for a7500af498a2592feb5bc54d0ae1aea4: Running coprocessor pre-open hook at 1731361683023Writing region info on filesystem at 1731361683023Initializing all the Stores at 1731361683024 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361683024Cleaning up temporary data from old regions at 1731361683054 (+30 ms)Running coprocessor post-open hooks at 1731361683057 (+3 ms)Region opened successfully at 1731361683058 (+1 ms) 2024-11-11T21:48:03,059 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., pid=14, masterSystemTime=1731361683012 2024-11-11T21:48:03,059 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store a7500af498a2592feb5bc54d0ae1aea4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:03,059 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T21:48:03,059 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:03,060 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:03,060 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1541): a7500af498a2592feb5bc54d0ae1aea4/info is initiating minor compaction (all files) 2024-11-11T21:48:03,061 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a7500af498a2592feb5bc54d0ae1aea4/info in TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:03,061 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf->hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/e2c37e3d2455400c8c5701f98d8429ba-top, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-756a858acbd0426781d9a28cc21a5cef, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-cfb2330cc30543f59e1c423942627c96, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-7deb6f518b2d4c14a9806721768027af] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp, totalSize=120.8 K 2024-11-11T21:48:03,061 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:03,061 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:03,061 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731361678130 2024-11-11T21:48:03,061 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:03,062 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => eab9b197412b79d6a3d1daed5218ca45, NAME => 'TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-11T21:48:03,062 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-756a858acbd0426781d9a28cc21a5cef, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731361682295 2024-11-11T21:48:03,062 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=a7500af498a2592feb5bc54d0ae1aea4, regionState=OPEN, openSeqNum=127, regionLocation=22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:03,062 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,062 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:48:03,062 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,062 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,062 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-cfb2330cc30543f59e1c423942627c96, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1731361682322 2024-11-11T21:48:03,063 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-7deb6f518b2d4c14a9806721768027af, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731361682350 2024-11-11T21:48:03,063 INFO [StoreOpener-eab9b197412b79d6a3d1daed5218ca45-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,064 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-11T21:48:03,064 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-11T21:48:03,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-11T21:48:03,064 INFO [StoreOpener-eab9b197412b79d6a3d1daed5218ca45-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eab9b197412b79d6a3d1daed5218ca45 columnFamilyName info 2024-11-11T21:48:03,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure a7500af498a2592feb5bc54d0ae1aea4, server=22a1f13b90b0,33459,1731361667092 because future has completed 2024-11-11T21:48:03,064 DEBUG [StoreOpener-eab9b197412b79d6a3d1daed5218ca45-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:03,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-11T21:48:03,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure a7500af498a2592feb5bc54d0ae1aea4, server=22a1f13b90b0,33459,1731361667092 in 206 msec 2024-11-11T21:48:03,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a7500af498a2592feb5bc54d0ae1aea4, ASSIGN in 367 msec 2024-11-11T21:48:03,075 DEBUG [StoreOpener-eab9b197412b79d6a3d1daed5218ca45-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf->hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/e2c37e3d2455400c8c5701f98d8429ba-bottom 2024-11-11T21:48:03,076 INFO [StoreOpener-eab9b197412b79d6a3d1daed5218ca45-1 {}] regionserver.HStore(327): Store=eab9b197412b79d6a3d1daed5218ca45/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:48:03,081 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,082 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,083 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,083 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,083 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/info/77a812f6abc042919dd0a0f27a496839 is 193, key is TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4./info:regioninfo/1731361683062/Put/seqid=0 2024-11-11T21:48:03,085 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,085 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened eab9b197412b79d6a3d1daed5218ca45; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849547, jitterRate=0.08025527000427246}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T21:48:03,086 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:03,086 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for eab9b197412b79d6a3d1daed5218ca45: Running coprocessor pre-open hook at 1731361683062Writing region info on filesystem at 1731361683062Initializing all the Stores at 1731361683063 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361683063Cleaning up temporary data from old regions at 1731361683083 (+20 ms)Running coprocessor post-open hooks at 1731361683086 (+3 ms)Region opened successfully at 1731361683086 2024-11-11T21:48:03,087 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45., pid=13, masterSystemTime=1731361683012 2024-11-11T21:48:03,087 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store eab9b197412b79d6a3d1daed5218ca45:info, priority=-2147483648, current under compaction store size is 2 2024-11-11T21:48:03,087 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:03,087 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-11T21:48:03,088 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:03,088 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HStore(1541): eab9b197412b79d6a3d1daed5218ca45/info is initiating minor compaction (all files) 2024-11-11T21:48:03,088 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of eab9b197412b79d6a3d1daed5218ca45/info in TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:03,088 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf->hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/e2c37e3d2455400c8c5701f98d8429ba-bottom] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/.tmp, totalSize=73.6 K 2024-11-11T21:48:03,089 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] compactions.Compactor(225): Compacting e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731361678130 2024-11-11T21:48:03,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741851_1027 (size=9882) 2024-11-11T21:48:03,089 DEBUG [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:03,090 INFO [RS_OPEN_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:03,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741851_1027 (size=9882) 2024-11-11T21:48:03,091 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=eab9b197412b79d6a3d1daed5218ca45, regionState=OPEN, openSeqNum=127, regionLocation=22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:03,091 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/info/77a812f6abc042919dd0a0f27a496839 2024-11-11T21:48:03,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure eab9b197412b79d6a3d1daed5218ca45, server=22a1f13b90b0,33459,1731361667092 because future has completed 2024-11-11T21:48:03,094 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a7500af498a2592feb5bc54d0ae1aea4#info#compaction#65 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:03,095 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/c595a6749dd045d3a8b896ebb5a175f5 is 1080, key is row0062/info:/1731361680282/Put/seqid=0 2024-11-11T21:48:03,098 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-11T21:48:03,099 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure eab9b197412b79d6a3d1daed5218ca45, server=22a1f13b90b0,33459,1731361667092 in 236 msec 2024-11-11T21:48:03,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-11T21:48:03,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=eab9b197412b79d6a3d1daed5218ca45, ASSIGN in 397 msec 2024-11-11T21:48:03,103 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=eab9b197412b79d6a3d1daed5218ca45, daughterB=a7500af498a2592feb5bc54d0ae1aea4 in 730 msec 2024-11-11T21:48:03,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=23de6ba4b50527b70cbd16e029eefa7a, daughterB=f62bdc70658dd0fd41b37f2e5c6593ab 2024-11-11T21:48:03,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=23de6ba4b50527b70cbd16e029eefa7a, daughterB=f62bdc70658dd0fd41b37f2e5c6593ab 2024-11-11T21:48:03,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=23de6ba4b50527b70cbd16e029eefa7a, daughterB=f62bdc70658dd0fd41b37f2e5c6593ab 2024-11-11T21:48:03,104 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => 793e534008933f55fa324116a0af01cf, NAME => 'TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-11T21:48:03,106 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=793e534008933f55fa324116a0af01cf, daughterA=23de6ba4b50527b70cbd16e029eefa7a, daughterB=f62bdc70658dd0fd41b37f2e5c6593ab in 725 msec 2024-11-11T21:48:03,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741852_1028 (size=43081) 2024-11-11T21:48:03,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741852_1028 (size=43081) 2024-11-11T21:48:03,112 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eab9b197412b79d6a3d1daed5218ca45#info#compaction#66 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:03,112 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/.tmp/info/f95627d5184942968caa8cd90a53c1c4 is 1080, key is row0001/info:/1731361678130/Put/seqid=0 2024-11-11T21:48:03,118 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/c595a6749dd045d3a8b896ebb5a175f5 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c595a6749dd045d3a8b896ebb5a175f5 2024-11-11T21:48:03,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/ns/9cc07a895e5240718e1dddb41ceeba31 is 43, key is default/ns:d/1731361667991/Put/seqid=0 2024-11-11T21:48:03,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741853_1029 (size=70862) 2024-11-11T21:48:03,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741853_1029 (size=70862) 2024-11-11T21:48:03,125 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in a7500af498a2592feb5bc54d0ae1aea4/info of a7500af498a2592feb5bc54d0ae1aea4 into c595a6749dd045d3a8b896ebb5a175f5(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:03,125 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:03,125 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., storeName=a7500af498a2592feb5bc54d0ae1aea4/info, priority=12, startTime=1731361683059; duration=0sec 2024-11-11T21:48:03,125 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:03,125 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a7500af498a2592feb5bc54d0ae1aea4:info 2024-11-11T21:48:03,126 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/.tmp/info/f95627d5184942968caa8cd90a53c1c4 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/info/f95627d5184942968caa8cd90a53c1c4 2024-11-11T21:48:03,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741854_1030 (size=5153) 2024-11-11T21:48:03,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741854_1030 (size=5153) 2024-11-11T21:48:03,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/ns/9cc07a895e5240718e1dddb41ceeba31 2024-11-11T21:48:03,132 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in eab9b197412b79d6a3d1daed5218ca45/info of eab9b197412b79d6a3d1daed5218ca45 into f95627d5184942968caa8cd90a53c1c4(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:03,132 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for eab9b197412b79d6a3d1daed5218ca45: 2024-11-11T21:48:03,132 INFO [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45., storeName=eab9b197412b79d6a3d1daed5218ca45/info, priority=15, startTime=1731361683087; duration=0sec 2024-11-11T21:48:03,132 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:03,132 DEBUG [RS:0;22a1f13b90b0:33459-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eab9b197412b79d6a3d1daed5218ca45:info 2024-11-11T21:48:03,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/table/45009203012544b6ad42af8f66ea60c4 is 65, key is TestLogRolling-testLogRolling/table:state/1731361668404/Put/seqid=0 2024-11-11T21:48:03,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741855_1031 (size=5340) 2024-11-11T21:48:03,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741855_1031 (size=5340) 2024-11-11T21:48:03,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/table/45009203012544b6ad42af8f66ea60c4 2024-11-11T21:48:03,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/info/77a812f6abc042919dd0a0f27a496839 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/info/77a812f6abc042919dd0a0f27a496839 2024-11-11T21:48:03,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/info/77a812f6abc042919dd0a0f27a496839, entries=30, sequenceid=17, filesize=9.7 K 2024-11-11T21:48:03,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/ns/9cc07a895e5240718e1dddb41ceeba31 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/ns/9cc07a895e5240718e1dddb41ceeba31 2024-11-11T21:48:03,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/ns/9cc07a895e5240718e1dddb41ceeba31, entries=2, sequenceid=17, filesize=5.0 K 2024-11-11T21:48:03,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/table/45009203012544b6ad42af8f66ea60c4 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/table/45009203012544b6ad42af8f66ea60c4 2024-11-11T21:48:03,583 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/table/45009203012544b6ad42af8f66ea60c4, entries=2, sequenceid=17, filesize=5.2 K 2024-11-11T21:48:03,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 520ms, sequenceid=17, compaction requested=false 2024-11-11T21:48:03,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T21:48:03,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:03,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:03,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:04,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:48966 deadline: 1731361694371, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. is not online on 22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:04,399 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., hostname=22a1f13b90b0,33459,1731361667092, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., hostname=22a1f13b90b0,33459,1731361667092, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. is not online on 22a1f13b90b0,33459,1731361667092 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T21:48:04,399 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., hostname=22a1f13b90b0,33459,1731361667092, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf. is not online on 22a1f13b90b0,33459,1731361667092 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-11T21:48:04,400 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731361668031.793e534008933f55fa324116a0af01cf., hostname=22a1f13b90b0,33459,1731361667092, seqNum=2 from cache 2024-11-11T21:48:04,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:04,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:04,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:05,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:05,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:05,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:06,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:06,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:06,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:07,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,588 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,588 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,588 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:07,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:07,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:07,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:08,125 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T21:48:08,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,178 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,182 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T21:48:08,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:08,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:08,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:09,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:09,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:09,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:10,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:10,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:10,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:11,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:11,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:11,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:12,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:12,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:12,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:13,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:13,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:13,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:14,435 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., hostname=22a1f13b90b0,33459,1731361667092, seqNum=127] 2024-11-11T21:48:14,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:14,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:48:14,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/f10d78e88e854289ac2903a0b7e831a4 is 1080, key is row0097/info:/1731361694436/Put/seqid=0 2024-11-11T21:48:14,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741856_1032 (size=12516) 2024-11-11T21:48:14,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741856_1032 (size=12516) 2024-11-11T21:48:14,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/f10d78e88e854289ac2903a0b7e831a4 2024-11-11T21:48:14,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/f10d78e88e854289ac2903a0b7e831a4 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/f10d78e88e854289ac2903a0b7e831a4 2024-11-11T21:48:14,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/f10d78e88e854289ac2903a0b7e831a4, entries=7, sequenceid=137, filesize=12.2 K 2024-11-11T21:48:14,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for a7500af498a2592feb5bc54d0ae1aea4 in 23ms, sequenceid=137, compaction requested=false 2024-11-11T21:48:14,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:14,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:14,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T21:48:14,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/cacb240b39234ab2818917e666334863 is 1080, key is row0104/info:/1731361694448/Put/seqid=0 2024-11-11T21:48:14,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741857_1033 (size=17906) 2024-11-11T21:48:14,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741857_1033 (size=17906) 2024-11-11T21:48:14,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/cacb240b39234ab2818917e666334863 2024-11-11T21:48:14,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/cacb240b39234ab2818917e666334863 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/cacb240b39234ab2818917e666334863 2024-11-11T21:48:14,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/cacb240b39234ab2818917e666334863, entries=12, sequenceid=152, filesize=17.5 K 2024-11-11T21:48:14,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for a7500af498a2592feb5bc54d0ae1aea4 in 23ms, sequenceid=152, compaction requested=true 2024-11-11T21:48:14,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:14,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a7500af498a2592feb5bc54d0ae1aea4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:14,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:14,494 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:48:14,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:14,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-11T21:48:14,496 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:48:14,496 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1541): a7500af498a2592feb5bc54d0ae1aea4/info is initiating minor compaction (all files) 2024-11-11T21:48:14,496 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a7500af498a2592feb5bc54d0ae1aea4/info in TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:14,497 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c595a6749dd045d3a8b896ebb5a175f5, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/f10d78e88e854289ac2903a0b7e831a4, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/cacb240b39234ab2818917e666334863] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp, totalSize=71.8 K 2024-11-11T21:48:14,497 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting c595a6749dd045d3a8b896ebb5a175f5, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731361680282 2024-11-11T21:48:14,497 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting f10d78e88e854289ac2903a0b7e831a4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731361694436 2024-11-11T21:48:14,498 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting cacb240b39234ab2818917e666334863, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1731361694448 2024-11-11T21:48:14,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/fb85b9c9dc4b417b960cc079bb8a40f4 is 1080, key is row0116/info:/1731361694472/Put/seqid=0 2024-11-11T21:48:14,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741858_1034 (size=16828) 2024-11-11T21:48:14,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741858_1034 (size=16828) 2024-11-11T21:48:14,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/fb85b9c9dc4b417b960cc079bb8a40f4 2024-11-11T21:48:14,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/fb85b9c9dc4b417b960cc079bb8a40f4 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/fb85b9c9dc4b417b960cc079bb8a40f4 2024-11-11T21:48:14,510 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a7500af498a2592feb5bc54d0ae1aea4#info#compaction#72 average throughput is 27.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:14,510 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/a4190a67e9aa4d3c92c65ee90ac7c49e is 1080, key is row0062/info:/1731361680282/Put/seqid=0 2024-11-11T21:48:14,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/fb85b9c9dc4b417b960cc079bb8a40f4, entries=11, sequenceid=166, filesize=16.4 K 2024-11-11T21:48:14,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=2.10 KB/2152 for a7500af498a2592feb5bc54d0ae1aea4 in 22ms, sequenceid=166, compaction requested=false 2024-11-11T21:48:14,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:14,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741859_1035 (size=63733) 2024-11-11T21:48:14,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741859_1035 (size=63733) 2024-11-11T21:48:14,528 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/a4190a67e9aa4d3c92c65ee90ac7c49e as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/a4190a67e9aa4d3c92c65ee90ac7c49e 2024-11-11T21:48:14,534 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a7500af498a2592feb5bc54d0ae1aea4/info of a7500af498a2592feb5bc54d0ae1aea4 into a4190a67e9aa4d3c92c65ee90ac7c49e(size=62.2 K), total size for store is 78.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:14,534 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:14,534 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., storeName=a7500af498a2592feb5bc54d0ae1aea4/info, priority=13, startTime=1731361694494; duration=0sec 2024-11-11T21:48:14,534 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:14,534 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a7500af498a2592feb5bc54d0ae1aea4:info 2024-11-11T21:48:14,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:14,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:14,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:15,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:15,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:15,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:16,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:16,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:48:16,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/ba1e9794486946b5a738288641c6d00d is 1080, key is row0127/info:/1731361694496/Put/seqid=0 2024-11-11T21:48:16,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741860_1036 (size=12516) 2024-11-11T21:48:16,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741860_1036 (size=12516) 2024-11-11T21:48:16,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/ba1e9794486946b5a738288641c6d00d 2024-11-11T21:48:16,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/ba1e9794486946b5a738288641c6d00d as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/ba1e9794486946b5a738288641c6d00d 2024-11-11T21:48:16,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/ba1e9794486946b5a738288641c6d00d, entries=7, sequenceid=177, filesize=12.2 K 2024-11-11T21:48:16,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for a7500af498a2592feb5bc54d0ae1aea4 in 25ms, sequenceid=177, compaction requested=true 2024-11-11T21:48:16,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:16,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a7500af498a2592feb5bc54d0ae1aea4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:16,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:16,541 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:48:16,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:16,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T21:48:16,542 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93077 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:48:16,542 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1541): a7500af498a2592feb5bc54d0ae1aea4/info is initiating minor compaction (all files) 2024-11-11T21:48:16,542 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a7500af498a2592feb5bc54d0ae1aea4/info in TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:16,542 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/a4190a67e9aa4d3c92c65ee90ac7c49e, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/fb85b9c9dc4b417b960cc079bb8a40f4, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/ba1e9794486946b5a738288641c6d00d] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp, totalSize=90.9 K 2024-11-11T21:48:16,543 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting a4190a67e9aa4d3c92c65ee90ac7c49e, keycount=54, bloomtype=ROW, size=62.2 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1731361680282 2024-11-11T21:48:16,543 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb85b9c9dc4b417b960cc079bb8a40f4, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731361694472 2024-11-11T21:48:16,543 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting ba1e9794486946b5a738288641c6d00d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1731361694496 2024-11-11T21:48:16,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7c08ac76067246fc8756d920d936a176 is 1080, key is row0134/info:/1731361696516/Put/seqid=0 2024-11-11T21:48:16,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741861_1037 (size=17906) 2024-11-11T21:48:16,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741861_1037 (size=17906) 2024-11-11T21:48:16,553 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7c08ac76067246fc8756d920d936a176 2024-11-11T21:48:16,557 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a7500af498a2592feb5bc54d0ae1aea4#info#compaction#75 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:16,558 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7ab2f587cff4400ab8b78a0be7723065 is 1080, key is row0062/info:/1731361680282/Put/seqid=0 2024-11-11T21:48:16,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7c08ac76067246fc8756d920d936a176 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7c08ac76067246fc8756d920d936a176 2024-11-11T21:48:16,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741862_1038 (size=83312) 2024-11-11T21:48:16,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741862_1038 (size=83312) 2024-11-11T21:48:16,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7c08ac76067246fc8756d920d936a176, entries=12, sequenceid=192, filesize=17.5 K 2024-11-11T21:48:16,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for a7500af498a2592feb5bc54d0ae1aea4 in 26ms, sequenceid=192, compaction requested=false 2024-11-11T21:48:16,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:16,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:16,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-11T21:48:16,568 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7ab2f587cff4400ab8b78a0be7723065 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7ab2f587cff4400ab8b78a0be7723065 2024-11-11T21:48:16,571 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7e48a20978bb43729d8133032664c210 is 1080, key is row0146/info:/1731361696542/Put/seqid=0 2024-11-11T21:48:16,575 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a7500af498a2592feb5bc54d0ae1aea4/info of a7500af498a2592feb5bc54d0ae1aea4 into 7ab2f587cff4400ab8b78a0be7723065(size=81.4 K), total size for store is 98.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:16,575 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:16,575 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., storeName=a7500af498a2592feb5bc54d0ae1aea4/info, priority=13, startTime=1731361696541; duration=0sec 2024-11-11T21:48:16,575 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:16,575 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a7500af498a2592feb5bc54d0ae1aea4:info 2024-11-11T21:48:16,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741863_1039 (size=16828) 2024-11-11T21:48:16,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741863_1039 (size=16828) 2024-11-11T21:48:16,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7e48a20978bb43729d8133032664c210 2024-11-11T21:48:16,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7e48a20978bb43729d8133032664c210 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7e48a20978bb43729d8133032664c210 2024-11-11T21:48:16,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7e48a20978bb43729d8133032664c210, entries=11, sequenceid=206, filesize=16.4 K 2024-11-11T21:48:16,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=4.20 KB/4304 for a7500af498a2592feb5bc54d0ae1aea4 in 21ms, sequenceid=206, compaction requested=true 2024-11-11T21:48:16,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:16,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a7500af498a2592feb5bc54d0ae1aea4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:16,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:16,589 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:48:16,590 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118046 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:48:16,590 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1541): a7500af498a2592feb5bc54d0ae1aea4/info is initiating minor compaction (all files) 2024-11-11T21:48:16,590 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a7500af498a2592feb5bc54d0ae1aea4/info in TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:16,590 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7ab2f587cff4400ab8b78a0be7723065, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7c08ac76067246fc8756d920d936a176, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7e48a20978bb43729d8133032664c210] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp, totalSize=115.3 K 2024-11-11T21:48:16,590 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7ab2f587cff4400ab8b78a0be7723065, keycount=72, bloomtype=ROW, size=81.4 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1731361680282 2024-11-11T21:48:16,590 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c08ac76067246fc8756d920d936a176, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1731361696516 2024-11-11T21:48:16,591 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e48a20978bb43729d8133032664c210, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1731361696542 2024-11-11T21:48:16,601 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a7500af498a2592feb5bc54d0ae1aea4#info#compaction#77 average throughput is 48.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:16,601 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/0d4e6284d7704b2da4300e1c514bed3c is 1080, key is row0062/info:/1731361680282/Put/seqid=0 2024-11-11T21:48:16,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741864_1040 (size=108216) 2024-11-11T21:48:16,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741864_1040 (size=108216) 2024-11-11T21:48:16,610 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/0d4e6284d7704b2da4300e1c514bed3c as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/0d4e6284d7704b2da4300e1c514bed3c 2024-11-11T21:48:16,616 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a7500af498a2592feb5bc54d0ae1aea4/info of a7500af498a2592feb5bc54d0ae1aea4 into 0d4e6284d7704b2da4300e1c514bed3c(size=105.7 K), total size for store is 105.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:16,616 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:16,616 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., storeName=a7500af498a2592feb5bc54d0ae1aea4/info, priority=13, startTime=1731361696589; duration=0sec 2024-11-11T21:48:16,616 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:16,616 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a7500af498a2592feb5bc54d0ae1aea4:info 2024-11-11T21:48:16,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:16,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:16,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:16,996 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T21:48:17,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:17,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:17,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:18,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:18,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:48:18,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7fdf7db60ece404194e1bf35acf29a46 is 1080, key is row0157/info:/1731361696569/Put/seqid=0 2024-11-11T21:48:18,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741865_1041 (size=12516) 2024-11-11T21:48:18,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741865_1041 (size=12516) 2024-11-11T21:48:18,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7fdf7db60ece404194e1bf35acf29a46 2024-11-11T21:48:18,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/7fdf7db60ece404194e1bf35acf29a46 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7fdf7db60ece404194e1bf35acf29a46 2024-11-11T21:48:18,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7fdf7db60ece404194e1bf35acf29a46, entries=7, sequenceid=218, filesize=12.2 K 2024-11-11T21:48:18,607 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for a7500af498a2592feb5bc54d0ae1aea4 in 25ms, sequenceid=218, compaction requested=false 2024-11-11T21:48:18,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:18,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-11T21:48:18,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/602d4bb0fd65487ca50f4172d32341a2 is 1080, key is row0164/info:/1731361698582/Put/seqid=0 2024-11-11T21:48:18,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741866_1042 (size=20078) 2024-11-11T21:48:18,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741866_1042 (size=20078) 2024-11-11T21:48:18,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/602d4bb0fd65487ca50f4172d32341a2 2024-11-11T21:48:18,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/602d4bb0fd65487ca50f4172d32341a2 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/602d4bb0fd65487ca50f4172d32341a2 2024-11-11T21:48:18,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/602d4bb0fd65487ca50f4172d32341a2, entries=14, sequenceid=235, filesize=19.6 K 2024-11-11T21:48:18,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for a7500af498a2592feb5bc54d0ae1aea4 in 24ms, sequenceid=235, compaction requested=true 2024-11-11T21:48:18,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:18,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a7500af498a2592feb5bc54d0ae1aea4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:18,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:18,632 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:48:18,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:18,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T21:48:18,633 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 140810 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:48:18,633 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1541): a7500af498a2592feb5bc54d0ae1aea4/info is initiating minor compaction (all files) 2024-11-11T21:48:18,633 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a7500af498a2592feb5bc54d0ae1aea4/info in TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:18,634 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/0d4e6284d7704b2da4300e1c514bed3c, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7fdf7db60ece404194e1bf35acf29a46, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/602d4bb0fd65487ca50f4172d32341a2] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp, totalSize=137.5 K 2024-11-11T21:48:18,634 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0d4e6284d7704b2da4300e1c514bed3c, keycount=95, bloomtype=ROW, size=105.7 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1731361680282 2024-11-11T21:48:18,634 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7fdf7db60ece404194e1bf35acf29a46, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1731361696569 2024-11-11T21:48:18,635 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 602d4bb0fd65487ca50f4172d32341a2, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731361698582 2024-11-11T21:48:18,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/d3cbf7ea4b7b4efca17d801db40632de is 1080, key is row0178/info:/1731361698609/Put/seqid=0 2024-11-11T21:48:18,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741867_1043 (size=17906) 2024-11-11T21:48:18,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741867_1043 (size=17906) 2024-11-11T21:48:18,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/d3cbf7ea4b7b4efca17d801db40632de 2024-11-11T21:48:18,647 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a7500af498a2592feb5bc54d0ae1aea4#info#compaction#81 average throughput is 39.68 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:18,648 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/049755e4c6d64f59a725772fbf5dafa8 is 1080, key is row0062/info:/1731361680282/Put/seqid=0 2024-11-11T21:48:18,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/d3cbf7ea4b7b4efca17d801db40632de as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/d3cbf7ea4b7b4efca17d801db40632de 2024-11-11T21:48:18,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741868_1044 (size=131088) 2024-11-11T21:48:18,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741868_1044 (size=131088) 2024-11-11T21:48:18,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/d3cbf7ea4b7b4efca17d801db40632de, entries=12, sequenceid=250, filesize=17.5 K 2024-11-11T21:48:18,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for a7500af498a2592feb5bc54d0ae1aea4 in 22ms, sequenceid=250, compaction requested=false 2024-11-11T21:48:18,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:18,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:18,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:18,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:19,059 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/049755e4c6d64f59a725772fbf5dafa8 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/049755e4c6d64f59a725772fbf5dafa8 2024-11-11T21:48:19,067 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a7500af498a2592feb5bc54d0ae1aea4/info of a7500af498a2592feb5bc54d0ae1aea4 into 049755e4c6d64f59a725772fbf5dafa8(size=128.0 K), total size for store is 145.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:19,067 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:19,067 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., storeName=a7500af498a2592feb5bc54d0ae1aea4/info, priority=13, startTime=1731361698632; duration=0sec 2024-11-11T21:48:19,067 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:19,067 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a7500af498a2592feb5bc54d0ae1aea4:info 2024-11-11T21:48:19,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:19,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:19,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:20,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:20,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:48:20,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/bf050ef918854df185a1ff49b589444d is 1080, key is row0190/info:/1731361698633/Put/seqid=0 2024-11-11T21:48:20,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741869_1045 (size=12520) 2024-11-11T21:48:20,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741869_1045 (size=12520) 2024-11-11T21:48:20,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/bf050ef918854df185a1ff49b589444d 2024-11-11T21:48:20,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/bf050ef918854df185a1ff49b589444d as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/bf050ef918854df185a1ff49b589444d 2024-11-11T21:48:20,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/bf050ef918854df185a1ff49b589444d, entries=7, sequenceid=261, filesize=12.2 K 2024-11-11T21:48:20,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for a7500af498a2592feb5bc54d0ae1aea4 in 26ms, sequenceid=261, compaction requested=true 2024-11-11T21:48:20,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:20,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a7500af498a2592feb5bc54d0ae1aea4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:20,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:20,680 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:48:20,681 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 161514 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:48:20,681 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1541): a7500af498a2592feb5bc54d0ae1aea4/info is initiating minor compaction (all files) 2024-11-11T21:48:20,681 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a7500af498a2592feb5bc54d0ae1aea4/info in TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:20,682 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/049755e4c6d64f59a725772fbf5dafa8, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/d3cbf7ea4b7b4efca17d801db40632de, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/bf050ef918854df185a1ff49b589444d] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp, totalSize=157.7 K 2024-11-11T21:48:20,682 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 049755e4c6d64f59a725772fbf5dafa8, keycount=116, bloomtype=ROW, size=128.0 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731361680282 2024-11-11T21:48:20,682 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3cbf7ea4b7b4efca17d801db40632de, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1731361698609 2024-11-11T21:48:20,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:20,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-11T21:48:20,683 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf050ef918854df185a1ff49b589444d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731361698633 2024-11-11T21:48:20,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/3b68fca3f7e047409bb8cdb250e02895 is 1080, key is row0197/info:/1731361700655/Put/seqid=0 2024-11-11T21:48:20,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741870_1046 (size=19013) 2024-11-11T21:48:20,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741870_1046 (size=19013) 2024-11-11T21:48:20,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/3b68fca3f7e047409bb8cdb250e02895 2024-11-11T21:48:20,697 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a7500af498a2592feb5bc54d0ae1aea4#info#compaction#84 average throughput is 46.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:20,699 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/94ab2479913543028efa6daa13444164 is 1080, key is row0062/info:/1731361680282/Put/seqid=0 2024-11-11T21:48:20,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741871_1047 (size=151749) 2024-11-11T21:48:20,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/3b68fca3f7e047409bb8cdb250e02895 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/3b68fca3f7e047409bb8cdb250e02895 2024-11-11T21:48:20,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741871_1047 (size=151749) 2024-11-11T21:48:20,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/3b68fca3f7e047409bb8cdb250e02895, entries=13, sequenceid=277, filesize=18.6 K 2024-11-11T21:48:20,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for a7500af498a2592feb5bc54d0ae1aea4 in 28ms, sequenceid=277, compaction requested=false 2024-11-11T21:48:20,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:20,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:20,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T21:48:20,712 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/94ab2479913543028efa6daa13444164 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/94ab2479913543028efa6daa13444164 2024-11-11T21:48:20,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/833703f8f95d43c48c5c3b15f0692a05 is 1080, key is row0210/info:/1731361700684/Put/seqid=0 2024-11-11T21:48:20,719 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a7500af498a2592feb5bc54d0ae1aea4/info of a7500af498a2592feb5bc54d0ae1aea4 into 94ab2479913543028efa6daa13444164(size=148.2 K), total size for store is 166.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:20,719 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:20,719 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., storeName=a7500af498a2592feb5bc54d0ae1aea4/info, priority=13, startTime=1731361700680; duration=0sec 2024-11-11T21:48:20,719 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:20,719 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a7500af498a2592feb5bc54d0ae1aea4:info 2024-11-11T21:48:20,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741872_1048 (size=17918) 2024-11-11T21:48:20,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741872_1048 (size=17918) 2024-11-11T21:48:20,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/833703f8f95d43c48c5c3b15f0692a05 2024-11-11T21:48:20,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/833703f8f95d43c48c5c3b15f0692a05 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/833703f8f95d43c48c5c3b15f0692a05 2024-11-11T21:48:20,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/833703f8f95d43c48c5c3b15f0692a05, entries=12, sequenceid=292, filesize=17.5 K 2024-11-11T21:48:20,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for a7500af498a2592feb5bc54d0ae1aea4 in 21ms, sequenceid=292, compaction requested=true 2024-11-11T21:48:20,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:20,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a7500af498a2592feb5bc54d0ae1aea4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:20,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:20,732 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:48:20,733 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188680 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:48:20,733 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1541): a7500af498a2592feb5bc54d0ae1aea4/info is initiating minor compaction (all files) 2024-11-11T21:48:20,733 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a7500af498a2592feb5bc54d0ae1aea4/info in TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:20,733 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/94ab2479913543028efa6daa13444164, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/3b68fca3f7e047409bb8cdb250e02895, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/833703f8f95d43c48c5c3b15f0692a05] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp, totalSize=184.3 K 2024-11-11T21:48:20,734 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 94ab2479913543028efa6daa13444164, keycount=135, bloomtype=ROW, size=148.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731361680282 2024-11-11T21:48:20,734 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3b68fca3f7e047409bb8cdb250e02895, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1731361700655 2024-11-11T21:48:20,735 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 833703f8f95d43c48c5c3b15f0692a05, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731361700684 2024-11-11T21:48:20,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:20,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:20,746 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a7500af498a2592feb5bc54d0ae1aea4#info#compaction#86 average throughput is 54.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:20,747 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/c3204dc86dce4d88a73416b36e4be58a is 1080, key is row0062/info:/1731361680282/Put/seqid=0 2024-11-11T21:48:20,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741873_1049 (size=178834) 2024-11-11T21:48:20,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741873_1049 (size=178834) 2024-11-11T21:48:20,756 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/c3204dc86dce4d88a73416b36e4be58a as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c3204dc86dce4d88a73416b36e4be58a 2024-11-11T21:48:20,762 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a7500af498a2592feb5bc54d0ae1aea4/info of a7500af498a2592feb5bc54d0ae1aea4 into c3204dc86dce4d88a73416b36e4be58a(size=174.6 K), total size for store is 174.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:20,762 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:20,762 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., storeName=a7500af498a2592feb5bc54d0ae1aea4/info, priority=13, startTime=1731361700732; duration=0sec 2024-11-11T21:48:20,762 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:20,762 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a7500af498a2592feb5bc54d0ae1aea4:info 2024-11-11T21:48:20,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:21,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:21,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:21,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:22,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:22,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-11T21:48:22,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/189511dbe24e4f119df56a2537ecf264 is 1080, key is row0222/info:/1731361700713/Put/seqid=0 2024-11-11T21:48:22,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741874_1050 (size=12523) 2024-11-11T21:48:22,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:22,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741874_1050 (size=12523) 2024-11-11T21:48:22,738 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/189511dbe24e4f119df56a2537ecf264 2024-11-11T21:48:22,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:22,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/189511dbe24e4f119df56a2537ecf264 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/189511dbe24e4f119df56a2537ecf264 2024-11-11T21:48:22,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/189511dbe24e4f119df56a2537ecf264, entries=7, sequenceid=304, filesize=12.2 K 2024-11-11T21:48:22,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for a7500af498a2592feb5bc54d0ae1aea4 in 22ms, sequenceid=304, compaction requested=false 2024-11-11T21:48:22,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:22,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:22,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-11T21:48:22,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/1dfac7abb4e34bb48b311a8ec055d3f6 is 1080, key is row0229/info:/1731361702728/Put/seqid=0 2024-11-11T21:48:22,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741875_1051 (size=16839) 2024-11-11T21:48:22,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741875_1051 (size=16839) 2024-11-11T21:48:22,759 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/1dfac7abb4e34bb48b311a8ec055d3f6 2024-11-11T21:48:22,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/1dfac7abb4e34bb48b311a8ec055d3f6 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/1dfac7abb4e34bb48b311a8ec055d3f6 2024-11-11T21:48:22,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/1dfac7abb4e34bb48b311a8ec055d3f6, entries=11, sequenceid=318, filesize=16.4 K 2024-11-11T21:48:22,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for a7500af498a2592feb5bc54d0ae1aea4 in 23ms, sequenceid=318, compaction requested=true 2024-11-11T21:48:22,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:22,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a7500af498a2592feb5bc54d0ae1aea4:info, priority=-2147483648, current under compaction store size is 1 2024-11-11T21:48:22,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:22,773 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T21:48:22,774 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 208196 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T21:48:22,774 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1541): a7500af498a2592feb5bc54d0ae1aea4/info is initiating minor compaction (all files) 2024-11-11T21:48:22,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33459 {}] regionserver.HRegion(8855): Flush requested on a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:22,775 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a7500af498a2592feb5bc54d0ae1aea4/info in TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:22,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-11T21:48:22,775 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c3204dc86dce4d88a73416b36e4be58a, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/189511dbe24e4f119df56a2537ecf264, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/1dfac7abb4e34bb48b311a8ec055d3f6] into tmpdir=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp, totalSize=203.3 K 2024-11-11T21:48:22,775 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting c3204dc86dce4d88a73416b36e4be58a, keycount=160, bloomtype=ROW, size=174.6 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731361680282 2024-11-11T21:48:22,776 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 189511dbe24e4f119df56a2537ecf264, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1731361700713 2024-11-11T21:48:22,776 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1dfac7abb4e34bb48b311a8ec055d3f6, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1731361702728 2024-11-11T21:48:22,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/4f135249c15a4f9886c11fa96db1c43b is 1080, key is row0240/info:/1731361702751/Put/seqid=0 2024-11-11T21:48:22,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741876_1052 (size=17918) 2024-11-11T21:48:22,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741876_1052 (size=17918) 2024-11-11T21:48:22,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/4f135249c15a4f9886c11fa96db1c43b 2024-11-11T21:48:22,795 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a7500af498a2592feb5bc54d0ae1aea4#info#compaction#90 average throughput is 36.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T21:48:22,796 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/d069d7ae4ad44f879922bcc224f3fd38 is 1080, key is row0062/info:/1731361680282/Put/seqid=0 2024-11-11T21:48:22,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/4f135249c15a4f9886c11fa96db1c43b as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/4f135249c15a4f9886c11fa96db1c43b 2024-11-11T21:48:22,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741877_1053 (size=198362) 2024-11-11T21:48:22,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741877_1053 (size=198362) 2024-11-11T21:48:22,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/4f135249c15a4f9886c11fa96db1c43b, entries=12, sequenceid=333, filesize=17.5 K 2024-11-11T21:48:22,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for a7500af498a2592feb5bc54d0ae1aea4 in 27ms, sequenceid=333, compaction requested=false 2024-11-11T21:48:22,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:22,805 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/d069d7ae4ad44f879922bcc224f3fd38 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/d069d7ae4ad44f879922bcc224f3fd38 2024-11-11T21:48:22,811 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a7500af498a2592feb5bc54d0ae1aea4/info of a7500af498a2592feb5bc54d0ae1aea4 into d069d7ae4ad44f879922bcc224f3fd38(size=193.7 K), total size for store is 211.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T21:48:22,811 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:22,811 INFO [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4., storeName=a7500af498a2592feb5bc54d0ae1aea4/info, priority=13, startTime=1731361702773; duration=0sec 2024-11-11T21:48:22,811 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T21:48:22,811 DEBUG [RS:0;22a1f13b90b0:33459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a7500af498a2592feb5bc54d0ae1aea4:info 2024-11-11T21:48:22,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:23,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:23,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:23,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:24,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:24,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:24,788 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-11T21:48:24,789 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33459%2C1731361667092.1731361704789 2024-11-11T21:48:24,799 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,799 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,799 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,799 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,799 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,799 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092/22a1f13b90b0%2C33459%2C1731361667092.1731361667514 with entries=317, filesize=310.18 KB; new WAL /user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092/22a1f13b90b0%2C33459%2C1731361667092.1731361704789 2024-11-11T21:48:24,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741833_1009 (size=317633) 2024-11-11T21:48:24,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741833_1009 (size=317633) 2024-11-11T21:48:24,807 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33915:33915),(127.0.0.1/127.0.0.1:43015:43015)] 2024-11-11T21:48:24,810 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for eab9b197412b79d6a3d1daed5218ca45: 2024-11-11T21:48:24,810 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-11T21:48:24,814 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/info/ec6463fd7cb24beda381a19a7de3b3fd is 186, key is TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45./info:regioninfo/1731361683091/Put/seqid=0 2024-11-11T21:48:24,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741879_1055 (size=6153) 2024-11-11T21:48:24,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741879_1055 (size=6153) 2024-11-11T21:48:24,819 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/info/ec6463fd7cb24beda381a19a7de3b3fd 2024-11-11T21:48:24,825 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/.tmp/info/ec6463fd7cb24beda381a19a7de3b3fd as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/info/ec6463fd7cb24beda381a19a7de3b3fd 2024-11-11T21:48:24,830 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/info/ec6463fd7cb24beda381a19a7de3b3fd, entries=5, sequenceid=21, filesize=6.0 K 2024-11-11T21:48:24,831 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-11T21:48:24,831 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-11T21:48:24,831 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a7500af498a2592feb5bc54d0ae1aea4 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-11T21:48:24,835 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/fba1f786d7b54f54b6fe976673d6725d is 1080, key is row0252/info:/1731361702777/Put/seqid=0 2024-11-11T21:48:24,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741880_1056 (size=10357) 2024-11-11T21:48:24,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741880_1056 (size=10357) 2024-11-11T21:48:24,841 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/fba1f786d7b54f54b6fe976673d6725d 2024-11-11T21:48:24,846 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/.tmp/info/fba1f786d7b54f54b6fe976673d6725d as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/fba1f786d7b54f54b6fe976673d6725d 2024-11-11T21:48:24,851 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/fba1f786d7b54f54b6fe976673d6725d, entries=5, sequenceid=342, filesize=10.1 K 2024-11-11T21:48:24,852 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for a7500af498a2592feb5bc54d0ae1aea4 in 21ms, sequenceid=342, compaction requested=true 2024-11-11T21:48:24,852 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a7500af498a2592feb5bc54d0ae1aea4: 2024-11-11T21:48:24,852 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C33459%2C1731361667092.1731361704852 2024-11-11T21:48:24,858 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,858 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,858 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,858 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,858 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:24,858 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092/22a1f13b90b0%2C33459%2C1731361667092.1731361704789 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092/22a1f13b90b0%2C33459%2C1731361667092.1731361704852 2024-11-11T21:48:24,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741878_1054 (size=731) 2024-11-11T21:48:24,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741878_1054 (size=731) 2024-11-11T21:48:24,861 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092/22a1f13b90b0%2C33459%2C1731361667092.1731361667514 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/oldWALs/22a1f13b90b0%2C33459%2C1731361667092.1731361667514 2024-11-11T21:48:24,862 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33915:33915),(127.0.0.1/127.0.0.1:43015:43015)] 2024-11-11T21:48:24,862 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T21:48:24,862 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/WALs/22a1f13b90b0,33459,1731361667092/22a1f13b90b0%2C33459%2C1731361667092.1731361704789 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/oldWALs/22a1f13b90b0%2C33459%2C1731361667092.1731361704789 2024-11-11T21:48:24,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T21:48:24,863 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:48:24,863 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:48:24,863 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:24,863 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:24,863 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T21:48:24,863 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T21:48:24,863 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=291721344, stopped=false 2024-11-11T21:48:24,863 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=22a1f13b90b0,45953,1731361667024 2024-11-11T21:48:24,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:48:24,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:48:24,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:24,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:24,865 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:48:24,865 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:48:24,865 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:48:24,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:24,865 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:48:24,866 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:48:24,866 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '22a1f13b90b0,33459,1731361667092' ***** 2024-11-11T21:48:24,866 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T21:48:24,866 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(3091): Received CLOSE for eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(3091): Received CLOSE for a7500af498a2592feb5bc54d0ae1aea4 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(959): stopping server 22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:48:24,866 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing eab9b197412b79d6a3d1daed5218ca45, disabling compactions & flushes 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;22a1f13b90b0:33459. 2024-11-11T21:48:24,866 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:24,866 DEBUG [RS:0;22a1f13b90b0:33459 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:48:24,866 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:24,866 DEBUG [RS:0;22a1f13b90b0:33459 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:24,866 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. after waiting 0 ms 2024-11-11T21:48:24,866 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T21:48:24,866 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T21:48:24,867 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-11T21:48:24,867 DEBUG [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(1325): Online Regions={eab9b197412b79d6a3d1daed5218ca45=TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45., 1588230740=hbase:meta,,1.1588230740, a7500af498a2592feb5bc54d0ae1aea4=TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.} 2024-11-11T21:48:24,867 DEBUG [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a7500af498a2592feb5bc54d0ae1aea4, eab9b197412b79d6a3d1daed5218ca45 2024-11-11T21:48:24,867 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:48:24,867 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:48:24,867 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:48:24,867 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:48:24,867 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf->hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/e2c37e3d2455400c8c5701f98d8429ba-bottom] to archive 2024-11-11T21:48:24,867 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:48:24,868 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T21:48:24,870 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf 2024-11-11T21:48:24,870 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=22a1f13b90b0:45953 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-11T21:48:24,870 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-11T21:48:24,872 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-11T21:48:24,873 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:48:24,873 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:48:24,873 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361704867Running coprocessor pre-close hooks at 1731361704867Disabling compacts and flushes for region at 1731361704867Disabling writes for close at 1731361704867Writing region close event to WAL at 1731361704869 (+2 ms)Running coprocessor post-close hooks at 1731361704872 (+3 ms)Closed at 1731361704873 (+1 ms) 2024-11-11T21:48:24,873 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T21:48:24,873 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/eab9b197412b79d6a3d1daed5218ca45/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-11T21:48:24,874 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:24,874 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for eab9b197412b79d6a3d1daed5218ca45: Waiting for close lock at 1731361704866Running coprocessor pre-close hooks at 1731361704866Disabling compacts and flushes for region at 1731361704866Disabling writes for close at 1731361704866Writing region close event to WAL at 1731361704871 (+5 ms)Running coprocessor post-close hooks at 1731361704874 (+3 ms)Closed at 1731361704874 2024-11-11T21:48:24,874 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731361682371.eab9b197412b79d6a3d1daed5218ca45. 2024-11-11T21:48:24,874 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a7500af498a2592feb5bc54d0ae1aea4, disabling compactions & flushes 2024-11-11T21:48:24,874 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:24,874 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:24,874 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. after waiting 0 ms 2024-11-11T21:48:24,874 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:24,875 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf->hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/793e534008933f55fa324116a0af01cf/info/e2c37e3d2455400c8c5701f98d8429ba-top, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-756a858acbd0426781d9a28cc21a5cef, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-cfb2330cc30543f59e1c423942627c96, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c595a6749dd045d3a8b896ebb5a175f5, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-7deb6f518b2d4c14a9806721768027af, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/f10d78e88e854289ac2903a0b7e831a4, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/a4190a67e9aa4d3c92c65ee90ac7c49e, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/cacb240b39234ab2818917e666334863, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/fb85b9c9dc4b417b960cc079bb8a40f4, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7ab2f587cff4400ab8b78a0be7723065, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/ba1e9794486946b5a738288641c6d00d, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7c08ac76067246fc8756d920d936a176, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/0d4e6284d7704b2da4300e1c514bed3c, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7e48a20978bb43729d8133032664c210, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7fdf7db60ece404194e1bf35acf29a46, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/049755e4c6d64f59a725772fbf5dafa8, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/602d4bb0fd65487ca50f4172d32341a2, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/d3cbf7ea4b7b4efca17d801db40632de, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/94ab2479913543028efa6daa13444164, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/bf050ef918854df185a1ff49b589444d, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/3b68fca3f7e047409bb8cdb250e02895, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c3204dc86dce4d88a73416b36e4be58a, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/833703f8f95d43c48c5c3b15f0692a05, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/189511dbe24e4f119df56a2537ecf264, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/1dfac7abb4e34bb48b311a8ec055d3f6] to archive 2024-11-11T21:48:24,876 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T21:48:24,877 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/e2c37e3d2455400c8c5701f98d8429ba.793e534008933f55fa324116a0af01cf 2024-11-11T21:48:24,878 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-756a858acbd0426781d9a28cc21a5cef to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-756a858acbd0426781d9a28cc21a5cef 2024-11-11T21:48:24,879 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-cfb2330cc30543f59e1c423942627c96 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-cfb2330cc30543f59e1c423942627c96 2024-11-11T21:48:24,880 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c595a6749dd045d3a8b896ebb5a175f5 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c595a6749dd045d3a8b896ebb5a175f5 2024-11-11T21:48:24,882 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-7deb6f518b2d4c14a9806721768027af to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/TestLogRolling-testLogRolling=793e534008933f55fa324116a0af01cf-7deb6f518b2d4c14a9806721768027af 2024-11-11T21:48:24,883 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/f10d78e88e854289ac2903a0b7e831a4 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/f10d78e88e854289ac2903a0b7e831a4 2024-11-11T21:48:24,884 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/a4190a67e9aa4d3c92c65ee90ac7c49e to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/a4190a67e9aa4d3c92c65ee90ac7c49e 2024-11-11T21:48:24,885 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/cacb240b39234ab2818917e666334863 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/cacb240b39234ab2818917e666334863 2024-11-11T21:48:24,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:24,886 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/fb85b9c9dc4b417b960cc079bb8a40f4 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/fb85b9c9dc4b417b960cc079bb8a40f4 2024-11-11T21:48:24,887 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7ab2f587cff4400ab8b78a0be7723065 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7ab2f587cff4400ab8b78a0be7723065 2024-11-11T21:48:24,888 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/ba1e9794486946b5a738288641c6d00d to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/ba1e9794486946b5a738288641c6d00d 2024-11-11T21:48:24,889 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7c08ac76067246fc8756d920d936a176 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7c08ac76067246fc8756d920d936a176 2024-11-11T21:48:24,890 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/0d4e6284d7704b2da4300e1c514bed3c to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/0d4e6284d7704b2da4300e1c514bed3c 2024-11-11T21:48:24,891 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7e48a20978bb43729d8133032664c210 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7e48a20978bb43729d8133032664c210 2024-11-11T21:48:24,892 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7fdf7db60ece404194e1bf35acf29a46 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/7fdf7db60ece404194e1bf35acf29a46 2024-11-11T21:48:24,893 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/049755e4c6d64f59a725772fbf5dafa8 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/049755e4c6d64f59a725772fbf5dafa8 2024-11-11T21:48:24,894 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/602d4bb0fd65487ca50f4172d32341a2 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/602d4bb0fd65487ca50f4172d32341a2 2024-11-11T21:48:24,896 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/d3cbf7ea4b7b4efca17d801db40632de to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/d3cbf7ea4b7b4efca17d801db40632de 2024-11-11T21:48:24,897 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/94ab2479913543028efa6daa13444164 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/94ab2479913543028efa6daa13444164 2024-11-11T21:48:24,898 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/bf050ef918854df185a1ff49b589444d to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/bf050ef918854df185a1ff49b589444d 2024-11-11T21:48:24,899 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/3b68fca3f7e047409bb8cdb250e02895 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/3b68fca3f7e047409bb8cdb250e02895 2024-11-11T21:48:24,899 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c3204dc86dce4d88a73416b36e4be58a to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/c3204dc86dce4d88a73416b36e4be58a 2024-11-11T21:48:24,900 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/833703f8f95d43c48c5c3b15f0692a05 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/833703f8f95d43c48c5c3b15f0692a05 2024-11-11T21:48:24,901 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/189511dbe24e4f119df56a2537ecf264 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/189511dbe24e4f119df56a2537ecf264 2024-11-11T21:48:24,902 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/1dfac7abb4e34bb48b311a8ec055d3f6 to hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/archive/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/info/1dfac7abb4e34bb48b311a8ec055d3f6 2024-11-11T21:48:24,903 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c595a6749dd045d3a8b896ebb5a175f5=43081, f10d78e88e854289ac2903a0b7e831a4=12516, a4190a67e9aa4d3c92c65ee90ac7c49e=63733, cacb240b39234ab2818917e666334863=17906, fb85b9c9dc4b417b960cc079bb8a40f4=16828, 7ab2f587cff4400ab8b78a0be7723065=83312, ba1e9794486946b5a738288641c6d00d=12516, 7c08ac76067246fc8756d920d936a176=17906, 0d4e6284d7704b2da4300e1c514bed3c=108216, 7e48a20978bb43729d8133032664c210=16828, 7fdf7db60ece404194e1bf35acf29a46=12516, 049755e4c6d64f59a725772fbf5dafa8=131088, 602d4bb0fd65487ca50f4172d32341a2=20078, d3cbf7ea4b7b4efca17d801db40632de=17906, 94ab2479913543028efa6daa13444164=151749, bf050ef918854df185a1ff49b589444d=12520, 3b68fca3f7e047409bb8cdb250e02895=19013, c3204dc86dce4d88a73416b36e4be58a=178834, 833703f8f95d43c48c5c3b15f0692a05=17918, 189511dbe24e4f119df56a2537ecf264=12523, 1dfac7abb4e34bb48b311a8ec055d3f6=16839] 2024-11-11T21:48:24,906 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/data/default/TestLogRolling-testLogRolling/a7500af498a2592feb5bc54d0ae1aea4/recovered.edits/345.seqid, newMaxSeqId=345, maxSeqId=126 2024-11-11T21:48:24,907 INFO [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:24,907 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a7500af498a2592feb5bc54d0ae1aea4: Waiting for close lock at 1731361704874Running coprocessor pre-close hooks at 1731361704874Disabling compacts and flushes for region at 1731361704874Disabling writes for close at 1731361704874Writing region close event to WAL at 1731361704903 (+29 ms)Running coprocessor post-close hooks at 1731361704907 (+4 ms)Closed at 1731361704907 2024-11-11T21:48:24,907 DEBUG [RS_CLOSE_REGION-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731361682371.a7500af498a2592feb5bc54d0ae1aea4. 2024-11-11T21:48:25,067 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(976): stopping server 22a1f13b90b0,33459,1731361667092; all regions closed. 2024-11-11T21:48:25,067 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,068 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,068 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,068 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,068 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741834_1010 (size=8107) 2024-11-11T21:48:25,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741834_1010 (size=8107) 2024-11-11T21:48:25,072 DEBUG [RS:0;22a1f13b90b0:33459 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/oldWALs 2024-11-11T21:48:25,072 INFO [RS:0;22a1f13b90b0:33459 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C33459%2C1731361667092.meta:.meta(num 1731361667941) 2024-11-11T21:48:25,072 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,073 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,073 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,073 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,073 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741881_1057 (size=780) 2024-11-11T21:48:25,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741881_1057 (size=780) 2024-11-11T21:48:25,076 DEBUG [RS:0;22a1f13b90b0:33459 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/oldWALs 2024-11-11T21:48:25,076 INFO [RS:0;22a1f13b90b0:33459 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C33459%2C1731361667092:(num 1731361704852) 2024-11-11T21:48:25,076 DEBUG [RS:0;22a1f13b90b0:33459 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:25,076 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:48:25,077 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:48:25,077 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.ChoreService(370): Chore service for: regionserver/22a1f13b90b0:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T21:48:25,077 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:48:25,077 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:48:25,077 INFO [RS:0;22a1f13b90b0:33459 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33459 2024-11-11T21:48:25,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:48:25,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/22a1f13b90b0,33459,1731361667092 2024-11-11T21:48:25,078 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:48:25,079 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [22a1f13b90b0,33459,1731361667092] 2024-11-11T21:48:25,079 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/22a1f13b90b0,33459,1731361667092 already deleted, retry=false 2024-11-11T21:48:25,079 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 22a1f13b90b0,33459,1731361667092 expired; onlineServers=0 2024-11-11T21:48:25,079 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '22a1f13b90b0,45953,1731361667024' ***** 2024-11-11T21:48:25,079 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T21:48:25,080 INFO [M:0;22a1f13b90b0:45953 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:48:25,080 INFO [M:0;22a1f13b90b0:45953 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:48:25,080 DEBUG [M:0;22a1f13b90b0:45953 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T21:48:25,080 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T21:48:25,080 DEBUG [M:0;22a1f13b90b0:45953 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T21:48:25,080 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361667309 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361667309,5,FailOnTimeoutGroup] 2024-11-11T21:48:25,080 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361667309 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361667309,5,FailOnTimeoutGroup] 2024-11-11T21:48:25,080 INFO [M:0;22a1f13b90b0:45953 {}] hbase.ChoreService(370): Chore service for: master/22a1f13b90b0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T21:48:25,080 INFO [M:0;22a1f13b90b0:45953 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:48:25,080 DEBUG [M:0;22a1f13b90b0:45953 {}] master.HMaster(1795): Stopping service threads 2024-11-11T21:48:25,080 INFO [M:0;22a1f13b90b0:45953 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T21:48:25,080 INFO [M:0;22a1f13b90b0:45953 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:48:25,080 INFO [M:0;22a1f13b90b0:45953 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T21:48:25,080 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T21:48:25,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T21:48:25,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:25,081 DEBUG [M:0;22a1f13b90b0:45953 {}] zookeeper.ZKUtil(347): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T21:48:25,081 WARN [M:0;22a1f13b90b0:45953 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T21:48:25,081 INFO [M:0;22a1f13b90b0:45953 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/.lastflushedseqids 2024-11-11T21:48:25,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741882_1058 (size=228) 2024-11-11T21:48:25,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741882_1058 (size=228) 2024-11-11T21:48:25,086 INFO [M:0;22a1f13b90b0:45953 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T21:48:25,086 INFO [M:0;22a1f13b90b0:45953 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T21:48:25,086 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:48:25,086 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:25,086 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:25,086 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:48:25,086 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:25,087 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.70 KB heapSize=65.92 KB 2024-11-11T21:48:25,101 DEBUG [M:0;22a1f13b90b0:45953 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/86d6ee2fc54743f1b1886d176961ae54 is 82, key is hbase:meta,,1/info:regioninfo/1731361667978/Put/seqid=0 2024-11-11T21:48:25,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741883_1059 (size=5672) 2024-11-11T21:48:25,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741883_1059 (size=5672) 2024-11-11T21:48:25,106 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/86d6ee2fc54743f1b1886d176961ae54 2024-11-11T21:48:25,124 DEBUG [M:0;22a1f13b90b0:45953 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/92f36f4a1ae6445b957cdc02cd92e605 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731361668411/Put/seqid=0 2024-11-11T21:48:25,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741884_1060 (size=7680) 2024-11-11T21:48:25,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741884_1060 (size=7680) 2024-11-11T21:48:25,129 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.09 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/92f36f4a1ae6445b957cdc02cd92e605 2024-11-11T21:48:25,133 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 92f36f4a1ae6445b957cdc02cd92e605 2024-11-11T21:48:25,148 DEBUG [M:0;22a1f13b90b0:45953 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/af3a0639a0e24723b6aef1757126677f is 69, key is 22a1f13b90b0,33459,1731361667092/rs:state/1731361667340/Put/seqid=0 2024-11-11T21:48:25,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741885_1061 (size=5156) 2024-11-11T21:48:25,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741885_1061 (size=5156) 2024-11-11T21:48:25,153 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/af3a0639a0e24723b6aef1757126677f 2024-11-11T21:48:25,170 DEBUG [M:0;22a1f13b90b0:45953 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/72018afb3e4542baa2bb7189b1717fa9 is 52, key is load_balancer_on/state:d/1731361668027/Put/seqid=0 2024-11-11T21:48:25,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741886_1062 (size=5056) 2024-11-11T21:48:25,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741886_1062 (size=5056) 2024-11-11T21:48:25,175 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/72018afb3e4542baa2bb7189b1717fa9 2024-11-11T21:48:25,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:48:25,179 INFO [RS:0;22a1f13b90b0:33459 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:48:25,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33459-0x10030c1d4250001, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:48:25,180 INFO [RS:0;22a1f13b90b0:33459 {}] regionserver.HRegionServer(1031): Exiting; stopping=22a1f13b90b0,33459,1731361667092; zookeeper connection closed. 2024-11-11T21:48:25,180 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e5dd0ea {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e5dd0ea 2024-11-11T21:48:25,180 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T21:48:25,180 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/86d6ee2fc54743f1b1886d176961ae54 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/86d6ee2fc54743f1b1886d176961ae54 2024-11-11T21:48:25,185 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/86d6ee2fc54743f1b1886d176961ae54, entries=8, sequenceid=129, filesize=5.5 K 2024-11-11T21:48:25,186 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/92f36f4a1ae6445b957cdc02cd92e605 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/92f36f4a1ae6445b957cdc02cd92e605 2024-11-11T21:48:25,190 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 92f36f4a1ae6445b957cdc02cd92e605 2024-11-11T21:48:25,190 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/92f36f4a1ae6445b957cdc02cd92e605, entries=14, sequenceid=129, filesize=7.5 K 2024-11-11T21:48:25,191 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/af3a0639a0e24723b6aef1757126677f as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/af3a0639a0e24723b6aef1757126677f 2024-11-11T21:48:25,195 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/af3a0639a0e24723b6aef1757126677f, entries=1, sequenceid=129, filesize=5.0 K 2024-11-11T21:48:25,196 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/72018afb3e4542baa2bb7189b1717fa9 as hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/72018afb3e4542baa2bb7189b1717fa9 2024-11-11T21:48:25,200 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38643/user/jenkins/test-data/b9d076d1-4105-e8fc-9a97-b777780b1381/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/72018afb3e4542baa2bb7189b1717fa9, entries=1, sequenceid=129, filesize=4.9 K 2024-11-11T21:48:25,201 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=129, compaction requested=false 2024-11-11T21:48:25,202 INFO [M:0;22a1f13b90b0:45953 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:25,202 DEBUG [M:0;22a1f13b90b0:45953 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361705086Disabling compacts and flushes for region at 1731361705086Disabling writes for close at 1731361705086Obtaining lock to block concurrent updates at 1731361705087 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731361705087Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54985, getHeapSize=67440, getOffHeapSize=0, getCellsCount=152 at 1731361705087Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731361705087Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731361705088 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731361705101 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731361705101Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731361705110 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731361705124 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731361705124Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731361705133 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731361705147 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731361705147Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731361705157 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731361705170 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731361705170Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49b3523e: reopening flushed file at 1731361705179 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@372ef281: reopening flushed file at 1731361705185 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@347813d6: reopening flushed file at 1731361705190 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b1c914c: reopening flushed file at 1731361705195 (+5 ms)Finished flush of dataSize ~53.70 KB/54985, heapSize ~65.86 KB/67440, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=129, compaction requested=false at 1731361705201 (+6 ms)Writing region close event to WAL at 1731361705202 (+1 ms)Closed at 1731361705202 2024-11-11T21:48:25,203 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,203 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,203 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,203 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,203 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:25,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43361 is added to blk_1073741830_1006 (size=63915) 2024-11-11T21:48:25,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36647 is added to blk_1073741830_1006 (size=63915) 2024-11-11T21:48:25,205 INFO [M:0;22a1f13b90b0:45953 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T21:48:25,205 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:48:25,205 INFO [M:0;22a1f13b90b0:45953 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45953 2024-11-11T21:48:25,205 INFO [M:0;22a1f13b90b0:45953 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:48:25,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:48:25,306 INFO [M:0;22a1f13b90b0:45953 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:48:25,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45953-0x10030c1d4250000, quorum=127.0.0.1:58844, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:48:25,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9612b29{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:48:25,313 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@314e7370{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:48:25,313 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:48:25,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1dab95de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:48:25,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7305dd28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.log.dir/,STOPPED} 2024-11-11T21:48:25,316 WARN [BP-1724013944-172.17.0.2-1731361666251 heartbeating to localhost/127.0.0.1:38643 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:48:25,316 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:48:25,316 WARN [BP-1724013944-172.17.0.2-1731361666251 heartbeating to localhost/127.0.0.1:38643 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1724013944-172.17.0.2-1731361666251 (Datanode Uuid a86f15ed-e888-4cc1-907f-709446a36319) service to localhost/127.0.0.1:38643 2024-11-11T21:48:25,316 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:48:25,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/data/data3/current/BP-1724013944-172.17.0.2-1731361666251 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:48:25,317 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/data/data4/current/BP-1724013944-172.17.0.2-1731361666251 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:48:25,317 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:48:25,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45890504{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:48:25,319 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d639fc0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:48:25,319 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:48:25,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53cff5cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:48:25,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a48d3d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.log.dir/,STOPPED} 2024-11-11T21:48:25,321 WARN [BP-1724013944-172.17.0.2-1731361666251 heartbeating to localhost/127.0.0.1:38643 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:48:25,321 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:48:25,321 WARN [BP-1724013944-172.17.0.2-1731361666251 heartbeating to localhost/127.0.0.1:38643 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1724013944-172.17.0.2-1731361666251 (Datanode Uuid 462bc2be-9f40-4149-b1c2-b792bfaa9f15) service to localhost/127.0.0.1:38643 2024-11-11T21:48:25,321 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:48:25,321 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/data/data1/current/BP-1724013944-172.17.0.2-1731361666251 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:48:25,322 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/cluster_4180ed5a-cb94-18b5-86e8-951533b2a76c/data/data2/current/BP-1724013944-172.17.0.2-1731361666251 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:48:25,322 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:48:25,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52d230c9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:48:25,328 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@240fc28c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:48:25,328 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:48:25,328 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56433553{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:48:25,328 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10ce7a76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.log.dir/,STOPPED} 2024-11-11T21:48:25,335 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T21:48:25,363 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T21:48:25,371 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=231 (was 207) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:38643 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38643 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38643 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:38643 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:38643 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38643 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38643 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:38643 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=517 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=101 (was 50) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4198 (was 5276) 2024-11-11T21:48:25,375 INFO [regionserver/22a1f13b90b0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:48:25,378 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=231, OpenFileDescriptor=517, MaxFileDescriptor=1048576, SystemLoadAverage=101, ProcessCount=11, AvailableMemoryMB=4198 2024-11-11T21:48:25,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T21:48:25,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.log.dir so I do NOT create it in target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2 2024-11-11T21:48:25,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/50339f40-5804-6a50-dc0b-928acb431703/hadoop.tmp.dir so I do NOT create it in target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2 2024-11-11T21:48:25,378 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba, deleteOnExit=true 2024-11-11T21:48:25,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/test.cache.data in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/hadoop.log.dir in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T21:48:25,379 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T21:48:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T21:48:25,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T21:48:25,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:48:25,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T21:48:25,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/nfs.dump.dir in system properties and HBase conf 2024-11-11T21:48:25,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/java.io.tmpdir in system properties and HBase conf 2024-11-11T21:48:25,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T21:48:25,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T21:48:25,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T21:48:25,394 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:48:25,502 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:48:25,505 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:48:25,506 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:48:25,506 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:48:25,506 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T21:48:25,507 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:48:25,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@412c5a2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:48:25,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5179a3de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:48:25,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@651fb028{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/java.io.tmpdir/jetty-localhost-44935-hadoop-hdfs-3_4_1-tests_jar-_-any-18233318354085451678/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:48:25,599 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62daa233{HTTP/1.1, (http/1.1)}{localhost:44935} 2024-11-11T21:48:25,599 INFO [Time-limited test {}] server.Server(415): Started @279999ms 2024-11-11T21:48:25,610 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-11T21:48:25,646 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:48:25,648 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:48:25,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:48:25,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:48:25,649 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T21:48:25,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10c1adfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:48:25,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@470d812e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:48:25,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:25,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:25,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47f122ec{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/java.io.tmpdir/jetty-localhost-38261-hadoop-hdfs-3_4_1-tests_jar-_-any-9378865990387694605/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:48:25,747 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@309e84ad{HTTP/1.1, (http/1.1)}{localhost:38261} 2024-11-11T21:48:25,747 INFO [Time-limited test {}] server.Server(415): Started @280147ms 2024-11-11T21:48:25,748 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:48:25,774 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T21:48:25,776 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T21:48:25,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T21:48:25,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T21:48:25,777 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T21:48:25,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1926aa54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/hadoop.log.dir/,AVAILABLE} 2024-11-11T21:48:25,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d88d0b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T21:48:25,802 WARN [Thread-2489 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/data/data1/current/BP-1938071021-172.17.0.2-1731361705396/current, will proceed with Du for space computation calculation, 2024-11-11T21:48:25,802 WARN [Thread-2490 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/data/data2/current/BP-1938071021-172.17.0.2-1731361705396/current, will proceed with Du for space computation calculation, 2024-11-11T21:48:25,816 WARN [Thread-2468 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:48:25,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd110122b6c428d2 with lease ID 0xef90c1ee7a305af1: Processing first storage report for DS-9566f0f9-51ed-49ef-aed4-208243847b8d from datanode DatanodeRegistration(127.0.0.1:42665, datanodeUuid=0d28cb86-5447-4b88-bb92-0260488402df, infoPort=42325, infoSecurePort=0, ipcPort=37807, storageInfo=lv=-57;cid=testClusterID;nsid=1973046819;c=1731361705396) 2024-11-11T21:48:25,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd110122b6c428d2 with lease ID 0xef90c1ee7a305af1: from storage DS-9566f0f9-51ed-49ef-aed4-208243847b8d node DatanodeRegistration(127.0.0.1:42665, datanodeUuid=0d28cb86-5447-4b88-bb92-0260488402df, infoPort=42325, infoSecurePort=0, ipcPort=37807, storageInfo=lv=-57;cid=testClusterID;nsid=1973046819;c=1731361705396), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:48:25,818 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd110122b6c428d2 with lease ID 0xef90c1ee7a305af1: Processing first storage report for DS-a0c97f05-df4b-4992-83a6-87eb87b4bcd7 from datanode DatanodeRegistration(127.0.0.1:42665, datanodeUuid=0d28cb86-5447-4b88-bb92-0260488402df, infoPort=42325, infoSecurePort=0, ipcPort=37807, storageInfo=lv=-57;cid=testClusterID;nsid=1973046819;c=1731361705396) 2024-11-11T21:48:25,818 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd110122b6c428d2 with lease ID 0xef90c1ee7a305af1: from storage DS-a0c97f05-df4b-4992-83a6-87eb87b4bcd7 node DatanodeRegistration(127.0.0.1:42665, datanodeUuid=0d28cb86-5447-4b88-bb92-0260488402df, infoPort=42325, infoSecurePort=0, ipcPort=37807, storageInfo=lv=-57;cid=testClusterID;nsid=1973046819;c=1731361705396), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:48:25,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35c58925{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/java.io.tmpdir/jetty-localhost-45425-hadoop-hdfs-3_4_1-tests_jar-_-any-1050352694406239887/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:48:25,875 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@265f6a26{HTTP/1.1, (http/1.1)}{localhost:45425} 2024-11-11T21:48:25,875 INFO [Time-limited test {}] server.Server(415): Started @280275ms 2024-11-11T21:48:25,876 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T21:48:25,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:25,942 WARN [Thread-2516 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/data/data4/current/BP-1938071021-172.17.0.2-1731361705396/current, will proceed with Du for space computation calculation, 2024-11-11T21:48:25,942 WARN [Thread-2515 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/data/data3/current/BP-1938071021-172.17.0.2-1731361705396/current, will proceed with Du for space computation calculation, 2024-11-11T21:48:25,957 WARN [Thread-2504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T21:48:25,959 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66bba348378cb2aa with lease ID 0xef90c1ee7a305af2: Processing first storage report for DS-db210c41-30d9-4e9f-b1e0-84121809617e from datanode DatanodeRegistration(127.0.0.1:40523, datanodeUuid=fed9ebd1-7986-4d5d-a1f6-3d5ac616a0a2, infoPort=34925, infoSecurePort=0, ipcPort=39903, storageInfo=lv=-57;cid=testClusterID;nsid=1973046819;c=1731361705396) 2024-11-11T21:48:25,959 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66bba348378cb2aa with lease ID 0xef90c1ee7a305af2: from storage DS-db210c41-30d9-4e9f-b1e0-84121809617e node DatanodeRegistration(127.0.0.1:40523, datanodeUuid=fed9ebd1-7986-4d5d-a1f6-3d5ac616a0a2, infoPort=34925, infoSecurePort=0, ipcPort=39903, storageInfo=lv=-57;cid=testClusterID;nsid=1973046819;c=1731361705396), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:48:25,959 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66bba348378cb2aa with lease ID 0xef90c1ee7a305af2: Processing first storage report for DS-3174d6ee-76d6-49d6-8e2d-85d45f1cb432 from datanode DatanodeRegistration(127.0.0.1:40523, datanodeUuid=fed9ebd1-7986-4d5d-a1f6-3d5ac616a0a2, infoPort=34925, infoSecurePort=0, ipcPort=39903, storageInfo=lv=-57;cid=testClusterID;nsid=1973046819;c=1731361705396) 2024-11-11T21:48:25,959 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66bba348378cb2aa with lease ID 0xef90c1ee7a305af2: from storage DS-3174d6ee-76d6-49d6-8e2d-85d45f1cb432 node DatanodeRegistration(127.0.0.1:40523, datanodeUuid=fed9ebd1-7986-4d5d-a1f6-3d5ac616a0a2, infoPort=34925, infoSecurePort=0, ipcPort=39903, storageInfo=lv=-57;cid=testClusterID;nsid=1973046819;c=1731361705396), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T21:48:25,999 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2 2024-11-11T21:48:26,002 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/zookeeper_0, clientPort=50978, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T21:48:26,002 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50978 2024-11-11T21:48:26,003 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:48:26,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:48:26,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:48:26,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741825_1001 (size=7) 2024-11-11T21:48:26,015 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e with version=8 2024-11-11T21:48:26,015 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42807/user/jenkins/test-data/c0989a9c-fee9-1f84-de4b-5bcd997990e0/hbase-staging 2024-11-11T21:48:26,016 INFO [Time-limited test {}] client.ConnectionUtils(128): master/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:48:26,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:48:26,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:48:26,016 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:48:26,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:48:26,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:48:26,017 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T21:48:26,017 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:48:26,017 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45553 2024-11-11T21:48:26,018 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45553 connecting to ZooKeeper ensemble=127.0.0.1:50978 2024-11-11T21:48:26,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:455530x0, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:48:26,022 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45553-0x10030c26c800000 connected 2024-11-11T21:48:26,033 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:48:26,034 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:48:26,036 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:48:26,036 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e, hbase.cluster.distributed=false 2024-11-11T21:48:26,038 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:48:26,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45553 2024-11-11T21:48:26,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45553 2024-11-11T21:48:26,039 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45553 2024-11-11T21:48:26,039 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45553 2024-11-11T21:48:26,039 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45553 2024-11-11T21:48:26,054 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/22a1f13b90b0:0 server-side Connection retries=45 2024-11-11T21:48:26,054 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:48:26,054 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T21:48:26,054 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T21:48:26,054 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T21:48:26,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T21:48:26,055 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T21:48:26,055 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T21:48:26,055 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37749 2024-11-11T21:48:26,056 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37749 connecting to ZooKeeper ensemble=127.0.0.1:50978 2024-11-11T21:48:26,056 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:48:26,058 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:48:26,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377490x0, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T21:48:26,061 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37749-0x10030c26c800001 connected 2024-11-11T21:48:26,061 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:48:26,061 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T21:48:26,061 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T21:48:26,062 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T21:48:26,063 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T21:48:26,063 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37749 2024-11-11T21:48:26,063 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37749 2024-11-11T21:48:26,063 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37749 2024-11-11T21:48:26,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37749 2024-11-11T21:48:26,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37749 2024-11-11T21:48:26,075 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;22a1f13b90b0:45553 2024-11-11T21:48:26,076 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/22a1f13b90b0,45553,1731361706016 2024-11-11T21:48:26,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:48:26,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:48:26,077 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/22a1f13b90b0,45553,1731361706016 2024-11-11T21:48:26,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T21:48:26,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,078 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T21:48:26,078 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/22a1f13b90b0,45553,1731361706016 from backup master directory 2024-11-11T21:48:26,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/22a1f13b90b0,45553,1731361706016 2024-11-11T21:48:26,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:48:26,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T21:48:26,079 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:48:26,079 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=22a1f13b90b0,45553,1731361706016 2024-11-11T21:48:26,085 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/hbase.id] with ID: 6955dac4-baf4-4707-aea7-fda8e55e2671 2024-11-11T21:48:26,085 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/.tmp/hbase.id 2024-11-11T21:48:26,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:48:26,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741826_1002 (size=42) 2024-11-11T21:48:26,093 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/.tmp/hbase.id]:[hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/hbase.id] 2024-11-11T21:48:26,103 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:48:26,103 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T21:48:26,104 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-11T21:48:26,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:48:26,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741827_1003 (size=196) 2024-11-11T21:48:26,113 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T21:48:26,114 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T21:48:26,114 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:48:26,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:48:26,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741828_1004 (size=1189) 2024-11-11T21:48:26,121 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store 2024-11-11T21:48:26,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:48:26,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741829_1005 (size=34) 2024-11-11T21:48:26,126 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:48:26,126 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:48:26,126 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:26,126 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:26,126 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:48:26,127 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:26,127 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:26,127 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361706126Disabling compacts and flushes for region at 1731361706126Disabling writes for close at 1731361706127 (+1 ms)Writing region close event to WAL at 1731361706127Closed at 1731361706127 2024-11-11T21:48:26,127 WARN [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/.initializing 2024-11-11T21:48:26,127 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/WALs/22a1f13b90b0,45553,1731361706016 2024-11-11T21:48:26,129 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C45553%2C1731361706016, suffix=, logDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/WALs/22a1f13b90b0,45553,1731361706016, archiveDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/oldWALs, maxLogs=10 2024-11-11T21:48:26,130 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C45553%2C1731361706016.1731361706129 2024-11-11T21:48:26,133 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/WALs/22a1f13b90b0,45553,1731361706016/22a1f13b90b0%2C45553%2C1731361706016.1731361706129 2024-11-11T21:48:26,134 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34925:34925),(127.0.0.1/127.0.0.1:42325:42325)] 2024-11-11T21:48:26,134 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:48:26,135 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:48:26,135 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,135 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T21:48:26,137 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:48:26,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T21:48:26,138 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:48:26,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T21:48:26,139 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:48:26,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T21:48:26,141 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,141 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T21:48:26,141 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,141 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,142 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,142 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,142 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,143 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T21:48:26,144 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T21:48:26,145 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:48:26,146 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=701094, jitterRate=-0.10851305723190308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T21:48:26,146 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731361706135Initializing all the Stores at 1731361706135Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361706135Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361706136 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361706136Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361706136Cleaning up temporary data from old regions at 1731361706142 (+6 ms)Region opened successfully at 1731361706146 (+4 ms) 2024-11-11T21:48:26,146 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T21:48:26,149 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b26fb6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:48:26,150 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T21:48:26,150 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T21:48:26,150 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T21:48:26,150 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T21:48:26,151 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T21:48:26,151 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T21:48:26,151 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T21:48:26,153 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T21:48:26,154 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T21:48:26,154 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T21:48:26,155 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T21:48:26,155 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T21:48:26,156 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T21:48:26,156 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T21:48:26,157 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T21:48:26,158 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T21:48:26,158 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T21:48:26,159 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T21:48:26,161 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T21:48:26,161 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T21:48:26,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:48:26,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T21:48:26,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,163 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=22a1f13b90b0,45553,1731361706016, sessionid=0x10030c26c800000, setting cluster-up flag (Was=false) 2024-11-11T21:48:26,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,166 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T21:48:26,167 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,45553,1731361706016 2024-11-11T21:48:26,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,171 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T21:48:26,172 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=22a1f13b90b0,45553,1731361706016 2024-11-11T21:48:26,173 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T21:48:26,175 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T21:48:26,175 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T21:48:26,175 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T21:48:26,175 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 22a1f13b90b0,45553,1731361706016 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T21:48:26,177 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:48:26,177 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:48:26,177 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:48:26,177 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=5, maxPoolSize=5 2024-11-11T21:48:26,177 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/22a1f13b90b0:0, corePoolSize=10, maxPoolSize=10 2024-11-11T21:48:26,177 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,177 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:48:26,177 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,181 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731361736181 2024-11-11T21:48:26,181 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T21:48:26,181 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T21:48:26,181 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T21:48:26,181 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T21:48:26,181 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T21:48:26,181 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:48:26,181 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T21:48:26,181 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T21:48:26,182 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,182 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T21:48:26,182 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T21:48:26,183 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T21:48:26,183 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T21:48:26,183 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T21:48:26,183 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T21:48:26,183 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361706183,5,FailOnTimeoutGroup] 2024-11-11T21:48:26,183 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361706183,5,FailOnTimeoutGroup] 2024-11-11T21:48:26,183 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,183 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T21:48:26,184 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,184 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:48:26,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741831_1007 (size=1321) 2024-11-11T21:48:26,192 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T21:48:26,192 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e 2024-11-11T21:48:26,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:48:26,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741832_1008 (size=32) 2024-11-11T21:48:26,199 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:48:26,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:48:26,201 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:48:26,201 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:48:26,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:48:26,202 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:48:26,202 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:48:26,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:48:26,203 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:48:26,203 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:48:26,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:48:26,204 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:48:26,204 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:48:26,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:48:26,205 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740 2024-11-11T21:48:26,206 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740 2024-11-11T21:48:26,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:48:26,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:48:26,207 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:48:26,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:48:26,210 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T21:48:26,210 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770979, jitterRate=-0.01964990794658661}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:48:26,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731361706199Initializing all the Stores at 1731361706199Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361706199Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361706199Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361706199Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361706199Cleaning up temporary data from old regions at 1731361706207 (+8 ms)Region opened successfully at 1731361706210 (+3 ms) 2024-11-11T21:48:26,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:48:26,210 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:48:26,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:48:26,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:48:26,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:48:26,211 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:48:26,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361706210Disabling compacts and flushes for region at 1731361706210Disabling writes for close at 1731361706211 (+1 ms)Writing region close event to WAL at 1731361706211Closed at 1731361706211 2024-11-11T21:48:26,212 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:48:26,212 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T21:48:26,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T21:48:26,213 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:48:26,213 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T21:48:26,265 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(746): ClusterId : 6955dac4-baf4-4707-aea7-fda8e55e2671 2024-11-11T21:48:26,265 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T21:48:26,267 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T21:48:26,267 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T21:48:26,269 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T21:48:26,269 DEBUG [RS:0;22a1f13b90b0:37749 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62103441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=22a1f13b90b0/172.17.0.2:0 2024-11-11T21:48:26,283 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;22a1f13b90b0:37749 2024-11-11T21:48:26,283 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T21:48:26,283 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T21:48:26,283 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T21:48:26,283 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(2659): reportForDuty to master=22a1f13b90b0,45553,1731361706016 with port=37749, startcode=1731361706054 2024-11-11T21:48:26,284 DEBUG [RS:0;22a1f13b90b0:37749 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T21:48:26,285 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57499, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T21:48:26,286 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45553 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,286 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45553 {}] master.ServerManager(517): Registering regionserver=22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,287 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e 2024-11-11T21:48:26,287 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44645 2024-11-11T21:48:26,287 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T21:48:26,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:48:26,288 DEBUG [RS:0;22a1f13b90b0:37749 {}] zookeeper.ZKUtil(111): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,288 WARN [RS:0;22a1f13b90b0:37749 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T21:48:26,289 INFO [RS:0;22a1f13b90b0:37749 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:48:26,289 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [22a1f13b90b0,37749,1731361706054] 2024-11-11T21:48:26,289 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,292 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T21:48:26,294 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T21:48:26,294 INFO [RS:0;22a1f13b90b0:37749 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T21:48:26,294 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,294 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T21:48:26,295 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T21:48:26,295 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/22a1f13b90b0:0, corePoolSize=2, maxPoolSize=2 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,295 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/22a1f13b90b0:0, corePoolSize=1, maxPoolSize=1 2024-11-11T21:48:26,296 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:48:26,296 DEBUG [RS:0;22a1f13b90b0:37749 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/22a1f13b90b0:0, corePoolSize=3, maxPoolSize=3 2024-11-11T21:48:26,296 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,296 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,296 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,296 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,296 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,296 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,37749,1731361706054-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:48:26,311 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T21:48:26,311 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,37749,1731361706054-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,311 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,311 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.Replication(171): 22a1f13b90b0,37749,1731361706054 started 2024-11-11T21:48:26,323 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,323 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1482): Serving as 22a1f13b90b0,37749,1731361706054, RpcServer on 22a1f13b90b0/172.17.0.2:37749, sessionid=0x10030c26c800001 2024-11-11T21:48:26,323 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T21:48:26,323 DEBUG [RS:0;22a1f13b90b0:37749 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,323 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,37749,1731361706054' 2024-11-11T21:48:26,323 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T21:48:26,324 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T21:48:26,324 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T21:48:26,324 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T21:48:26,325 DEBUG [RS:0;22a1f13b90b0:37749 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,325 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '22a1f13b90b0,37749,1731361706054' 2024-11-11T21:48:26,325 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T21:48:26,325 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T21:48:26,325 DEBUG [RS:0;22a1f13b90b0:37749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T21:48:26,325 INFO [RS:0;22a1f13b90b0:37749 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T21:48:26,325 INFO [RS:0;22a1f13b90b0:37749 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T21:48:26,364 WARN [22a1f13b90b0:45553 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-11T21:48:26,429 INFO [RS:0;22a1f13b90b0:37749 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C37749%2C1731361706054, suffix=, logDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/22a1f13b90b0,37749,1731361706054, archiveDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/oldWALs, maxLogs=32 2024-11-11T21:48:26,430 INFO [RS:0;22a1f13b90b0:37749 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C37749%2C1731361706054.1731361706430 2024-11-11T21:48:26,440 INFO [RS:0;22a1f13b90b0:37749 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/22a1f13b90b0,37749,1731361706054/22a1f13b90b0%2C37749%2C1731361706054.1731361706430 2024-11-11T21:48:26,442 DEBUG [RS:0;22a1f13b90b0:37749 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34925:34925),(127.0.0.1/127.0.0.1:42325:42325)] 2024-11-11T21:48:26,614 DEBUG [22a1f13b90b0:45553 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T21:48:26,615 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,618 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,37749,1731361706054, state=OPENING 2024-11-11T21:48:26,621 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T21:48:26,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,625 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T21:48:26,625 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:48:26,625 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:48:26,625 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,37749,1731361706054}] 2024-11-11T21:48:26,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:26,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:26,780 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T21:48:26,786 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41665, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T21:48:26,793 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T21:48:26,793 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:48:26,795 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=22a1f13b90b0%2C37749%2C1731361706054.meta, suffix=.meta, logDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/22a1f13b90b0,37749,1731361706054, archiveDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/oldWALs, maxLogs=32 2024-11-11T21:48:26,795 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 22a1f13b90b0%2C37749%2C1731361706054.meta.1731361706795.meta 2024-11-11T21:48:26,801 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/22a1f13b90b0,37749,1731361706054/22a1f13b90b0%2C37749%2C1731361706054.meta.1731361706795.meta 2024-11-11T21:48:26,805 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34925:34925),(127.0.0.1/127.0.0.1:42325:42325)] 2024-11-11T21:48:26,809 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T21:48:26,809 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T21:48:26,809 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T21:48:26,809 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T21:48:26,809 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T21:48:26,810 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T21:48:26,810 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T21:48:26,810 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T21:48:26,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T21:48:26,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T21:48:26,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:48:26,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T21:48:26,813 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T21:48:26,813 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:48:26,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T21:48:26,814 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T21:48:26,814 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:48:26,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T21:48:26,816 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T21:48:26,816 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T21:48:26,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T21:48:26,816 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T21:48:26,817 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740 2024-11-11T21:48:26,818 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740 2024-11-11T21:48:26,819 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T21:48:26,819 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T21:48:26,819 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T21:48:26,820 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T21:48:26,821 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759577, jitterRate=-0.03414863348007202}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T21:48:26,821 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T21:48:26,821 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731361706810Writing region info on filesystem at 1731361706810Initializing all the Stores at 1731361706811 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361706811Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361706811Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731361706811Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731361706811Cleaning up temporary data from old regions at 1731361706819 (+8 ms)Running coprocessor post-open hooks at 1731361706821 (+2 ms)Region opened successfully at 1731361706821 2024-11-11T21:48:26,822 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731361706779 2024-11-11T21:48:26,824 DEBUG [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T21:48:26,824 INFO [RS_OPEN_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T21:48:26,824 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,825 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 22a1f13b90b0,37749,1731361706054, state=OPEN 2024-11-11T21:48:26,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:48:26,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T21:48:26,827 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:48:26,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T21:48:26,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T21:48:26,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=22a1f13b90b0,37749,1731361706054 in 202 msec 2024-11-11T21:48:26,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T21:48:26,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 617 msec 2024-11-11T21:48:26,832 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T21:48:26,832 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T21:48:26,833 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:48:26,833 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,37749,1731361706054, seqNum=-1] 2024-11-11T21:48:26,833 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:48:26,834 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40949, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:48:26,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 663 msec 2024-11-11T21:48:26,838 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731361706838, completionTime=-1 2024-11-11T21:48:26,838 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T21:48:26,838 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-11T21:48:26,840 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-11T21:48:26,840 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731361766840 2024-11-11T21:48:26,840 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731361826840 2024-11-11T21:48:26,840 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-11T21:48:26,840 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45553,1731361706016-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,840 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45553,1731361706016-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,840 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45553,1731361706016-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,841 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-22a1f13b90b0:45553, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,841 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,841 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,842 DEBUG [master/22a1f13b90b0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T21:48:26,844 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.765sec 2024-11-11T21:48:26,844 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T21:48:26,844 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T21:48:26,844 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T21:48:26,844 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T21:48:26,844 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T21:48:26,844 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45553,1731361706016-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T21:48:26,844 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45553,1731361706016-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T21:48:26,846 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T21:48:26,846 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T21:48:26,846 INFO [master/22a1f13b90b0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=22a1f13b90b0,45553,1731361706016-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T21:48:26,866 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d2ffe35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:48:26,866 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 22a1f13b90b0,45553,-1 for getting cluster id 2024-11-11T21:48:26,866 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T21:48:26,867 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6955dac4-baf4-4707-aea7-fda8e55e2671' 2024-11-11T21:48:26,868 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T21:48:26,868 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6955dac4-baf4-4707-aea7-fda8e55e2671" 2024-11-11T21:48:26,868 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45c11cf1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:48:26,868 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [22a1f13b90b0,45553,-1] 2024-11-11T21:48:26,868 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T21:48:26,868 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:26,869 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43490, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T21:48:26,870 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@698d258d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T21:48:26,870 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T21:48:26,871 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=22a1f13b90b0,37749,1731361706054, seqNum=-1] 2024-11-11T21:48:26,871 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T21:48:26,872 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57824, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T21:48:26,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=22a1f13b90b0,45553,1731361706016 2024-11-11T21:48:26,874 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T21:48:26,876 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-11T21:48:26,876 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T21:48:26,878 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/test.com,8080,1, archiveDir=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/oldWALs, maxLogs=32 2024-11-11T21:48:26,879 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731361706879 2024-11-11T21:48:26,884 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731361706879 2024-11-11T21:48:26,885 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34925:34925),(127.0.0.1/127.0.0.1:42325:42325)] 2024-11-11T21:48:26,886 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731361706885 2024-11-11T21:48:26,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:26,891 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,891 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,891 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,892 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,892 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,892 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731361706879 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731361706885 2024-11-11T21:48:26,893 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42325:42325),(127.0.0.1/127.0.0.1:34925:34925)] 2024-11-11T21:48:26,893 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731361706879 is not closed yet, will try archiving it next time 2024-11-11T21:48:26,894 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,894 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741835_1011 (size=93) 2024-11-11T21:48:26,895 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,895 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741835_1011 (size=93) 2024-11-11T21:48:26,895 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:26,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741836_1012 (size=93) 2024-11-11T21:48:26,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741836_1012 (size=93) 2024-11-11T21:48:26,901 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/WALs/test.com,8080,1/test.com%2C8080%2C1.1731361706879 to hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/oldWALs/test.com%2C8080%2C1.1731361706879 2024-11-11T21:48:26,903 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/oldWALs 2024-11-11T21:48:26,903 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731361706885) 2024-11-11T21:48:26,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T21:48:26,903 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:48:26,903 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:48:26,903 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:26,904 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:26,904 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T21:48:26,904 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T21:48:26,904 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=140123057, stopped=false 2024-11-11T21:48:26,904 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=22a1f13b90b0,45553,1731361706016 2024-11-11T21:48:26,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:48:26,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T21:48:26,905 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:48:26,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:26,905 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T21:48:26,905 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:48:26,905 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:26,905 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '22a1f13b90b0,37749,1731361706054' ***** 2024-11-11T21:48:26,905 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T21:48:26,906 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:48:26,906 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(959): stopping server 22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;22a1f13b90b0:37749. 2024-11-11T21:48:26,906 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T21:48:26,906 DEBUG [RS:0;22a1f13b90b0:37749 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T21:48:26,906 DEBUG [RS:0;22a1f13b90b0:37749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T21:48:26,906 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T21:48:26,907 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T21:48:26,907 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T21:48:26,907 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T21:48:26,907 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T21:48:26,907 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T21:48:26,907 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T21:48:26,907 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T21:48:26,907 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T21:48:26,907 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-11T21:48:26,923 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740/.tmp/ns/0efab27978c34e84bf32e252901baa7d is 43, key is default/ns:d/1731361706834/Put/seqid=0 2024-11-11T21:48:26,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741837_1013 (size=5153) 2024-11-11T21:48:26,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741837_1013 (size=5153) 2024-11-11T21:48:27,107 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T21:48:27,296 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T21:48:27,297 INFO [regionserver/22a1f13b90b0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T21:48:27,307 DEBUG [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T21:48:27,329 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740/.tmp/ns/0efab27978c34e84bf32e252901baa7d 2024-11-11T21:48:27,339 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740/.tmp/ns/0efab27978c34e84bf32e252901baa7d as hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740/ns/0efab27978c34e84bf32e252901baa7d 2024-11-11T21:48:27,344 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740/ns/0efab27978c34e84bf32e252901baa7d, entries=2, sequenceid=6, filesize=5.0 K 2024-11-11T21:48:27,345 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 438ms, sequenceid=6, compaction requested=false 2024-11-11T21:48:27,349 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T21:48:27,350 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T21:48:27,350 INFO [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T21:48:27,350 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731361706907Running coprocessor pre-close hooks at 1731361706907Disabling compacts and flushes for region at 1731361706907Disabling writes for close at 1731361706907Obtaining lock to block concurrent updates at 1731361706907Preparing flush snapshotting stores in 1588230740 at 1731361706907Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731361706907Flushing stores of hbase:meta,,1.1588230740 at 1731361706908 (+1 ms)Flushing 1588230740/ns: creating writer at 1731361706908Flushing 1588230740/ns: appending metadata at 1731361706922 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731361706922Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51147a8: reopening flushed file at 1731361707338 (+416 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 438ms, sequenceid=6, compaction requested=false at 1731361707345 (+7 ms)Writing region close event to WAL at 1731361707346 (+1 ms)Running coprocessor post-close hooks at 1731361707350 (+4 ms)Closed at 1731361707350 2024-11-11T21:48:27,350 DEBUG [RS_CLOSE_META-regionserver/22a1f13b90b0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T21:48:27,507 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(976): stopping server 22a1f13b90b0,37749,1731361706054; all regions closed. 2024-11-11T21:48:27,518 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,518 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,518 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,518 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,518 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741834_1010 (size=1152) 2024-11-11T21:48:27,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741834_1010 (size=1152) 2024-11-11T21:48:27,523 DEBUG [RS:0;22a1f13b90b0:37749 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/oldWALs 2024-11-11T21:48:27,523 INFO [RS:0;22a1f13b90b0:37749 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C37749%2C1731361706054.meta:.meta(num 1731361706795) 2024-11-11T21:48:27,523 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,523 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,523 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,523 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,523 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741833_1009 (size=93) 2024-11-11T21:48:27,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741833_1009 (size=93) 2024-11-11T21:48:27,527 DEBUG [RS:0;22a1f13b90b0:37749 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/oldWALs 2024-11-11T21:48:27,527 INFO [RS:0;22a1f13b90b0:37749 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 22a1f13b90b0%2C37749%2C1731361706054:(num 1731361706430) 2024-11-11T21:48:27,527 DEBUG [RS:0;22a1f13b90b0:37749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T21:48:27,527 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T21:48:27,527 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:48:27,527 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.ChoreService(370): Chore service for: regionserver/22a1f13b90b0:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T21:48:27,528 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:48:27,528 INFO [regionserver/22a1f13b90b0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:48:27,528 INFO [RS:0;22a1f13b90b0:37749 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37749 2024-11-11T21:48:27,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T21:48:27,529 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:48:27,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/22a1f13b90b0,37749,1731361706054 2024-11-11T21:48:27,530 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [22a1f13b90b0,37749,1731361706054] 2024-11-11T21:48:27,530 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/22a1f13b90b0,37749,1731361706054 already deleted, retry=false 2024-11-11T21:48:27,531 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 22a1f13b90b0,37749,1731361706054 expired; onlineServers=0 2024-11-11T21:48:27,531 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '22a1f13b90b0,45553,1731361706016' ***** 2024-11-11T21:48:27,531 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T21:48:27,531 INFO [M:0;22a1f13b90b0:45553 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T21:48:27,531 INFO [M:0;22a1f13b90b0:45553 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T21:48:27,531 DEBUG [M:0;22a1f13b90b0:45553 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T21:48:27,531 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T21:48:27,531 DEBUG [M:0;22a1f13b90b0:45553 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T21:48:27,531 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361706183 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.large.0-1731361706183,5,FailOnTimeoutGroup] 2024-11-11T21:48:27,531 DEBUG [master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361706183 {}] cleaner.HFileCleaner(306): Exit Thread[master/22a1f13b90b0:0:becomeActiveMaster-HFileCleaner.small.0-1731361706183,5,FailOnTimeoutGroup] 2024-11-11T21:48:27,531 INFO [M:0;22a1f13b90b0:45553 {}] hbase.ChoreService(370): Chore service for: master/22a1f13b90b0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T21:48:27,531 INFO [M:0;22a1f13b90b0:45553 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T21:48:27,531 DEBUG [M:0;22a1f13b90b0:45553 {}] master.HMaster(1795): Stopping service threads 2024-11-11T21:48:27,531 INFO [M:0;22a1f13b90b0:45553 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T21:48:27,531 INFO [M:0;22a1f13b90b0:45553 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T21:48:27,531 INFO [M:0;22a1f13b90b0:45553 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T21:48:27,531 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T21:48:27,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T21:48:27,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T21:48:27,532 DEBUG [M:0;22a1f13b90b0:45553 {}] zookeeper.ZKUtil(347): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T21:48:27,532 WARN [M:0;22a1f13b90b0:45553 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T21:48:27,533 INFO [M:0;22a1f13b90b0:45553 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/.lastflushedseqids 2024-11-11T21:48:27,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741838_1014 (size=99) 2024-11-11T21:48:27,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741838_1014 (size=99) 2024-11-11T21:48:27,541 INFO [M:0;22a1f13b90b0:45553 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T21:48:27,542 INFO [M:0;22a1f13b90b0:45553 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T21:48:27,542 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T21:48:27,542 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:27,542 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:27,542 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T21:48:27,542 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:27,542 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-11T21:48:27,558 DEBUG [M:0;22a1f13b90b0:45553 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b0f98f1e027b4272a3ab584df65081c0 is 82, key is hbase:meta,,1/info:regioninfo/1731361706824/Put/seqid=0 2024-11-11T21:48:27,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741839_1015 (size=5672) 2024-11-11T21:48:27,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741839_1015 (size=5672) 2024-11-11T21:48:27,583 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b0f98f1e027b4272a3ab584df65081c0 2024-11-11T21:48:27,605 DEBUG [M:0;22a1f13b90b0:45553 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4232a36eab648f3ba933ecf0258468d is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731361706837/Put/seqid=0 2024-11-11T21:48:27,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741840_1016 (size=5275) 2024-11-11T21:48:27,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741840_1016 (size=5275) 2024-11-11T21:48:27,610 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4232a36eab648f3ba933ecf0258468d 2024-11-11T21:48:27,628 DEBUG [M:0;22a1f13b90b0:45553 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d96d0181029d4d1895a185d5288d195f is 69, key is 22a1f13b90b0,37749,1731361706054/rs:state/1731361706286/Put/seqid=0 2024-11-11T21:48:27,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:48:27,630 INFO [RS:0;22a1f13b90b0:37749 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:48:27,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37749-0x10030c26c800001, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:48:27,630 INFO [RS:0;22a1f13b90b0:37749 {}] regionserver.HRegionServer(1031): Exiting; stopping=22a1f13b90b0,37749,1731361706054; zookeeper connection closed. 2024-11-11T21:48:27,630 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4abafbe4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4abafbe4 2024-11-11T21:48:27,631 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T21:48:27,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741841_1017 (size=5156) 2024-11-11T21:48:27,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741841_1017 (size=5156) 2024-11-11T21:48:27,634 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d96d0181029d4d1895a185d5288d195f 2024-11-11T21:48:27,659 DEBUG [M:0;22a1f13b90b0:45553 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aa98832321e244a9b779192df47794a0 is 52, key is load_balancer_on/state:d/1731361706875/Put/seqid=0 2024-11-11T21:48:27,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741842_1018 (size=5056) 2024-11-11T21:48:27,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741842_1018 (size=5056) 2024-11-11T21:48:27,664 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aa98832321e244a9b779192df47794a0 2024-11-11T21:48:27,670 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b0f98f1e027b4272a3ab584df65081c0 as hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b0f98f1e027b4272a3ab584df65081c0 2024-11-11T21:48:27,675 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b0f98f1e027b4272a3ab584df65081c0, entries=8, sequenceid=29, filesize=5.5 K 2024-11-11T21:48:27,676 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4232a36eab648f3ba933ecf0258468d as hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4232a36eab648f3ba933ecf0258468d 2024-11-11T21:48:27,680 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4232a36eab648f3ba933ecf0258468d, entries=3, sequenceid=29, filesize=5.2 K 2024-11-11T21:48:27,681 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d96d0181029d4d1895a185d5288d195f as hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d96d0181029d4d1895a185d5288d195f 2024-11-11T21:48:27,685 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d96d0181029d4d1895a185d5288d195f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-11T21:48:27,685 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aa98832321e244a9b779192df47794a0 as hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aa98832321e244a9b779192df47794a0 2024-11-11T21:48:27,689 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44645/user/jenkins/test-data/02cb1f23-fdf3-6337-b969-a4b91f058a6e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aa98832321e244a9b779192df47794a0, entries=1, sequenceid=29, filesize=4.9 K 2024-11-11T21:48:27,690 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false 2024-11-11T21:48:27,694 INFO [M:0;22a1f13b90b0:45553 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T21:48:27,694 DEBUG [M:0;22a1f13b90b0:45553 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731361707542Disabling compacts and flushes for region at 1731361707542Disabling writes for close at 1731361707542Obtaining lock to block concurrent updates at 1731361707542Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731361707542Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731361707542Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731361707543 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731361707543Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731361707558 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731361707558Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731361707587 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731361707604 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731361707604Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731361707614 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731361707628 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731361707628Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731361707640 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731361707658 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731361707658Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@483cd636: reopening flushed file at 1731361707669 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a839305: reopening flushed file at 1731361707675 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6253ebdc: reopening flushed file at 1731361707680 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c8e9287: reopening flushed file at 1731361707685 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 148ms, sequenceid=29, compaction requested=false at 1731361707690 (+5 ms)Writing region close event to WAL at 1731361707694 (+4 ms)Closed at 1731361707694 2024-11-11T21:48:27,694 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,694 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,694 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,695 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,695 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T21:48:27,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42665 is added to blk_1073741830_1006 (size=10311) 2024-11-11T21:48:27,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40523 is added to blk_1073741830_1006 (size=10311) 2024-11-11T21:48:27,697 INFO [M:0;22a1f13b90b0:45553 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T21:48:27,697 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T21:48:27,697 INFO [M:0;22a1f13b90b0:45553 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45553 2024-11-11T21:48:27,697 INFO [M:0;22a1f13b90b0:45553 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T21:48:27,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,33181,1731361530149/22a1f13b90b0%2C33181%2C1731361530149.meta.1731361531348.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:27,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/WALs/22a1f13b90b0,37551,1731361531521/22a1f13b90b0%2C37551%2C1731361531521.1731361531725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-11T21:48:27,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:48:27,799 INFO [M:0;22a1f13b90b0:45553 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T21:48:27,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45553-0x10030c26c800000, quorum=127.0.0.1:50978, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T21:48:27,802 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35c58925{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:48:27,802 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@265f6a26{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:48:27,802 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:48:27,802 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d88d0b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:48:27,803 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1926aa54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/hadoop.log.dir/,STOPPED} 2024-11-11T21:48:27,805 WARN [BP-1938071021-172.17.0.2-1731361705396 heartbeating to localhost/127.0.0.1:44645 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:48:27,805 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:48:27,805 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:48:27,805 WARN [BP-1938071021-172.17.0.2-1731361705396 heartbeating to localhost/127.0.0.1:44645 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1938071021-172.17.0.2-1731361705396 (Datanode Uuid fed9ebd1-7986-4d5d-a1f6-3d5ac616a0a2) service to localhost/127.0.0.1:44645 2024-11-11T21:48:27,806 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/data/data3/current/BP-1938071021-172.17.0.2-1731361705396 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:48:27,806 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/data/data4/current/BP-1938071021-172.17.0.2-1731361705396 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:48:27,806 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:48:27,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47f122ec{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T21:48:27,814 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@309e84ad{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:48:27,814 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:48:27,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@470d812e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:48:27,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10c1adfc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/hadoop.log.dir/,STOPPED} 2024-11-11T21:48:27,816 WARN [BP-1938071021-172.17.0.2-1731361705396 heartbeating to localhost/127.0.0.1:44645 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T21:48:27,816 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T21:48:27,816 WARN [BP-1938071021-172.17.0.2-1731361705396 heartbeating to localhost/127.0.0.1:44645 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1938071021-172.17.0.2-1731361705396 (Datanode Uuid 0d28cb86-5447-4b88-bb92-0260488402df) service to localhost/127.0.0.1:44645 2024-11-11T21:48:27,816 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T21:48:27,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/data/data1/current/BP-1938071021-172.17.0.2-1731361705396 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:48:27,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/cluster_6a94c650-af49-558c-0e72-4f7936a29eba/data/data2/current/BP-1938071021-172.17.0.2-1731361705396 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T21:48:27,817 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T21:48:27,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@651fb028{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T21:48:27,824 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62daa233{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T21:48:27,824 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T21:48:27,824 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5179a3de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T21:48:27,824 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@412c5a2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ee85b543-35e1-19ea-fcb5-8b065ee695b2/hadoop.log.dir/,STOPPED} 2024-11-11T21:48:27,830 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T21:48:27,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T21:48:27,856 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 231) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44645 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44645 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44645 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:44645 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:44645 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:44645 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:44645 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44645 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=540 (was 517) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=101 (was 101), ProcessCount=11 (was 11), AvailableMemoryMB=4162 (was 4198) 2024-11-11T21:48:27,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39361/user/jenkins/test-data/3fb73592-5125-ccd9-7369-0e4e8f5001e4/MasterData/WALs/22a1f13b90b0,42845,1731361530096/22a1f13b90b0%2C42845%2C1731361530096.1731361530659 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more