2024-11-16 08:34:42,457 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 08:34:42,469 main DEBUG Took 0.009877 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-16 08:34:42,469 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-16 08:34:42,470 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-16 08:34:42,471 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-16 08:34:42,472 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,479 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-16 08:34:42,491 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,492 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,493 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,493 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,494 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,494 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,495 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,496 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,496 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,496 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,498 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,498 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,499 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,500 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,500 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,500 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,501 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,501 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,502 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,502 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,503 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,503 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,503 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,504 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-16 08:34:42,504 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,504 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-16 08:34:42,506 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-16 08:34:42,507 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-16 08:34:42,509 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-16 08:34:42,510 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-16 08:34:42,511 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-16 08:34:42,511 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-16 08:34:42,520 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-16 08:34:42,523 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-16 08:34:42,524 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-16 08:34:42,525 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-16 08:34:42,525 main DEBUG createAppenders(={Console}) 2024-11-16 08:34:42,526 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-16 08:34:42,526 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-16 08:34:42,527 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-16 08:34:42,527 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-16 08:34:42,527 main DEBUG OutputStream closed 2024-11-16 08:34:42,528 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-16 08:34:42,528 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-16 08:34:42,528 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-16 08:34:42,599 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-16 08:34:42,602 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-16 08:34:42,603 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-16 08:34:42,604 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-16 08:34:42,605 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-16 08:34:42,605 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-16 08:34:42,606 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-16 08:34:42,606 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-16 08:34:42,606 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-16 08:34:42,607 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-16 08:34:42,607 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-16 08:34:42,607 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-16 08:34:42,608 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-16 08:34:42,608 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-16 08:34:42,608 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-16 08:34:42,609 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-16 08:34:42,609 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-16 08:34:42,610 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-16 08:34:42,612 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16 08:34:42,613 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-16 08:34:42,613 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-16 08:34:42,614 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-16T08:34:42,862 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba 2024-11-16 08:34:42,865 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-16 08:34:42,865 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-16T08:34:42,874 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-16T08:34:42,906 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=339, ProcessCount=11, AvailableMemoryMB=3042 2024-11-16T08:34:42,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T08:34:42,924 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc, deleteOnExit=true 2024-11-16T08:34:42,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T08:34:42,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/test.cache.data in system properties and HBase conf 2024-11-16T08:34:42,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T08:34:42,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.log.dir in system properties and HBase conf 2024-11-16T08:34:42,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T08:34:42,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T08:34:42,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T08:34:43,005 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-16T08:34:43,093 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T08:34:43,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:34:43,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:34:43,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T08:34:43,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:34:43,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T08:34:43,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T08:34:43,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:34:43,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:34:43,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T08:34:43,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/nfs.dump.dir in system properties and HBase conf 2024-11-16T08:34:43,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/java.io.tmpdir in system properties and HBase conf 2024-11-16T08:34:43,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:34:43,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T08:34:43,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T08:34:43,561 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:34:44,223 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-16T08:34:44,302 INFO [Time-limited test {}] log.Log(170): Logging initialized @2537ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-16T08:34:44,397 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:34:44,480 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:34:44,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:34:44,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:34:44,520 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:34:44,540 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:34:44,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:34:44,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:34:44,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/java.io.tmpdir/jetty-localhost-37887-hadoop-hdfs-3_4_1-tests_jar-_-any-8744267202277836170/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:34:44,796 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:37887} 2024-11-16T08:34:44,796 INFO [Time-limited test {}] server.Server(415): Started @3032ms 2024-11-16T08:34:44,843 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:34:45,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:34:45,549 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:34:45,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:34:45,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:34:45,558 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:34:45,561 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:34:45,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:34:45,679 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ca8488f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/java.io.tmpdir/jetty-localhost-40473-hadoop-hdfs-3_4_1-tests_jar-_-any-17693949427958535718/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:34:45,680 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@dc1ca4f{HTTP/1.1, (http/1.1)}{localhost:40473} 2024-11-16T08:34:45,680 INFO [Time-limited test {}] server.Server(415): Started @3916ms 2024-11-16T08:34:45,746 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:34:45,935 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:34:45,955 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:34:45,979 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:34:45,980 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:34:45,980 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:34:45,983 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:34:45,984 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:34:46,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ca1952e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/java.io.tmpdir/jetty-localhost-45737-hadoop-hdfs-3_4_1-tests_jar-_-any-1189394442499163818/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:34:46,122 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75639b0e{HTTP/1.1, (http/1.1)}{localhost:45737} 2024-11-16T08:34:46,122 INFO [Time-limited test {}] server.Server(415): Started @4358ms 2024-11-16T08:34:46,125 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:34:48,033 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/data/data4/current/BP-712895127-172.17.0.3-1731746083643/current, will proceed with Du for space computation calculation, 2024-11-16T08:34:48,033 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/data/data2/current/BP-712895127-172.17.0.3-1731746083643/current, will proceed with Du for space computation calculation, 2024-11-16T08:34:48,033 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/data/data3/current/BP-712895127-172.17.0.3-1731746083643/current, will proceed with Du for space computation calculation, 2024-11-16T08:34:48,033 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/data/data1/current/BP-712895127-172.17.0.3-1731746083643/current, will proceed with Du for space computation calculation, 2024-11-16T08:34:48,069 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:34:48,069 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:34:48,116 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6829356f3445d9e with lease ID 0xef824a325fd3d37a: Processing first storage report for DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5 from datanode DatanodeRegistration(127.0.0.1:44307, datanodeUuid=e6418a5c-e3e7-4118-9ed9-a67e8748a7bc, infoPort=34955, infoSecurePort=0, ipcPort=36287, storageInfo=lv=-57;cid=testClusterID;nsid=1411588721;c=1731746083643) 2024-11-16T08:34:48,117 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6829356f3445d9e with lease ID 0xef824a325fd3d37a: from storage DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5 node DatanodeRegistration(127.0.0.1:44307, datanodeUuid=e6418a5c-e3e7-4118-9ed9-a67e8748a7bc, infoPort=34955, infoSecurePort=0, ipcPort=36287, storageInfo=lv=-57;cid=testClusterID;nsid=1411588721;c=1731746083643), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T08:34:48,117 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7279c4e2ce784911 with lease ID 0xef824a325fd3d37b: Processing first storage report for DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16 from datanode DatanodeRegistration(127.0.0.1:44245, datanodeUuid=66151ea6-5765-4166-afe4-4071cd671cb6, infoPort=44653, infoSecurePort=0, ipcPort=44063, storageInfo=lv=-57;cid=testClusterID;nsid=1411588721;c=1731746083643) 2024-11-16T08:34:48,118 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7279c4e2ce784911 with lease ID 0xef824a325fd3d37b: from storage DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16 node DatanodeRegistration(127.0.0.1:44245, datanodeUuid=66151ea6-5765-4166-afe4-4071cd671cb6, infoPort=44653, infoSecurePort=0, ipcPort=44063, storageInfo=lv=-57;cid=testClusterID;nsid=1411588721;c=1731746083643), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:34:48,118 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6829356f3445d9e with lease ID 0xef824a325fd3d37a: Processing first storage report for DS-f664e06d-23fa-4612-9558-280610c74452 from datanode DatanodeRegistration(127.0.0.1:44307, datanodeUuid=e6418a5c-e3e7-4118-9ed9-a67e8748a7bc, infoPort=34955, infoSecurePort=0, ipcPort=36287, storageInfo=lv=-57;cid=testClusterID;nsid=1411588721;c=1731746083643) 2024-11-16T08:34:48,118 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6829356f3445d9e with lease ID 0xef824a325fd3d37a: from storage DS-f664e06d-23fa-4612-9558-280610c74452 node DatanodeRegistration(127.0.0.1:44307, datanodeUuid=e6418a5c-e3e7-4118-9ed9-a67e8748a7bc, infoPort=34955, infoSecurePort=0, ipcPort=36287, storageInfo=lv=-57;cid=testClusterID;nsid=1411588721;c=1731746083643), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T08:34:48,118 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7279c4e2ce784911 with lease ID 0xef824a325fd3d37b: Processing first storage report for DS-20cc3f64-e221-4aae-b0c3-57ec3657e0da from datanode DatanodeRegistration(127.0.0.1:44245, datanodeUuid=66151ea6-5765-4166-afe4-4071cd671cb6, infoPort=44653, infoSecurePort=0, ipcPort=44063, storageInfo=lv=-57;cid=testClusterID;nsid=1411588721;c=1731746083643) 2024-11-16T08:34:48,118 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7279c4e2ce784911 with lease ID 0xef824a325fd3d37b: from storage DS-20cc3f64-e221-4aae-b0c3-57ec3657e0da node DatanodeRegistration(127.0.0.1:44245, datanodeUuid=66151ea6-5765-4166-afe4-4071cd671cb6, infoPort=44653, infoSecurePort=0, ipcPort=44063, storageInfo=lv=-57;cid=testClusterID;nsid=1411588721;c=1731746083643), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:34:48,203 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba 2024-11-16T08:34:48,270 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/zookeeper_0, clientPort=62441, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T08:34:48,279 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62441 2024-11-16T08:34:48,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:34:48,291 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:34:48,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:34:48,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:34:48,908 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d with version=8 2024-11-16T08:34:48,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/hbase-staging 2024-11-16T08:34:48,990 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-16T08:34:49,226 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:34:49,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:34:49,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:34:49,242 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:34:49,242 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:34:49,242 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:34:49,369 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T08:34:49,423 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-16T08:34:49,433 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-16T08:34:49,436 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:34:49,460 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 100944 (auto-detected) 2024-11-16T08:34:49,461 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-16T08:34:49,479 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46533 2024-11-16T08:34:49,500 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46533 connecting to ZooKeeper ensemble=127.0.0.1:62441 2024-11-16T08:34:49,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:465330x0, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:34:49,648 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46533-0x10142c840e70000 connected 2024-11-16T08:34:49,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:34:49,757 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:34:49,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:34:49,774 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d, hbase.cluster.distributed=false 2024-11-16T08:34:49,802 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:34:49,807 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46533 2024-11-16T08:34:49,807 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46533 2024-11-16T08:34:49,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46533 2024-11-16T08:34:49,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46533 2024-11-16T08:34:49,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46533 2024-11-16T08:34:49,900 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:34:49,901 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:34:49,901 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:34:49,902 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:34:49,902 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:34:49,902 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:34:49,904 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T08:34:49,908 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:34:49,909 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34865 2024-11-16T08:34:49,912 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34865 connecting to ZooKeeper ensemble=127.0.0.1:62441 2024-11-16T08:34:49,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:34:49,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:34:49,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348650x0, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:34:49,931 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34865-0x10142c840e70001 connected 2024-11-16T08:34:49,931 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:34:49,934 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T08:34:49,941 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T08:34:49,943 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T08:34:49,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:34:49,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34865 2024-11-16T08:34:49,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34865 2024-11-16T08:34:49,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34865 2024-11-16T08:34:49,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34865 2024-11-16T08:34:49,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34865 2024-11-16T08:34:49,964 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c27dd56784bd:46533 2024-11-16T08:34:49,964 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c27dd56784bd,46533,1731746089076 2024-11-16T08:34:49,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:34:49,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:34:49,984 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c27dd56784bd,46533,1731746089076 2024-11-16T08:34:50,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T08:34:50,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,015 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T08:34:50,017 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c27dd56784bd,46533,1731746089076 from backup master directory 2024-11-16T08:34:50,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:34:50,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c27dd56784bd,46533,1731746089076 2024-11-16T08:34:50,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:34:50,025 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:34:50,025 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c27dd56784bd,46533,1731746089076 2024-11-16T08:34:50,026 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-16T08:34:50,028 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-16T08:34:50,081 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/hbase.id] with ID: 254dc117-a069-41d0-839b-cb811ef0c897 2024-11-16T08:34:50,081 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/.tmp/hbase.id 2024-11-16T08:34:50,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:34:50,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:34:50,093 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/.tmp/hbase.id]:[hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/hbase.id] 2024-11-16T08:34:50,137 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:34:50,143 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T08:34:50,160 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-11-16T08:34:50,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:34:50,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:34:50,206 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:34:50,208 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T08:34:50,213 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:34:50,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:34:50,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:34:50,257 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store 2024-11-16T08:34:50,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:34:50,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:34:50,282 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-16T08:34:50,284 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:34:50,285 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:34:50,286 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:34:50,286 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:34:50,287 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:34:50,287 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:34:50,287 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:34:50,288 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746090285Disabling compacts and flushes for region at 1731746090285Disabling writes for close at 1731746090287 (+2 ms)Writing region close event to WAL at 1731746090287Closed at 1731746090287 2024-11-16T08:34:50,291 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/.initializing 2024-11-16T08:34:50,291 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/WALs/c27dd56784bd,46533,1731746089076 2024-11-16T08:34:50,311 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C46533%2C1731746089076, suffix=, logDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/WALs/c27dd56784bd,46533,1731746089076, archiveDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/oldWALs, maxLogs=10 2024-11-16T08:34:50,319 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C46533%2C1731746089076.1731746090315 2024-11-16T08:34:50,337 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/WALs/c27dd56784bd,46533,1731746089076/c27dd56784bd%2C46533%2C1731746089076.1731746090315 2024-11-16T08:34:50,346 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44653:44653),(127.0.0.1/127.0.0.1:34955:34955)] 2024-11-16T08:34:50,347 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:34:50,348 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:34:50,351 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,352 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T08:34:50,414 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:50,416 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:34:50,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,420 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T08:34:50,420 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:50,421 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:34:50,421 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,424 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T08:34:50,424 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:50,425 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:34:50,426 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,428 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T08:34:50,428 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:50,429 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:34:50,430 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,433 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,434 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,440 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,441 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,444 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T08:34:50,447 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:34:50,451 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:34:50,452 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836671, jitterRate=0.06388236582279205}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T08:34:50,459 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731746090363Initializing all the Stores at 1731746090365 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746090366 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746090366Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746090367 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746090367Cleaning up temporary data from old regions at 1731746090441 (+74 ms)Region opened successfully at 1731746090459 (+18 ms) 2024-11-16T08:34:50,460 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T08:34:50,489 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2830a5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:34:50,516 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T08:34:50,526 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T08:34:50,526 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T08:34:50,529 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T08:34:50,530 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-16T08:34:50,534 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-11-16T08:34:50,535 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T08:34:50,559 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T08:34:50,567 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T08:34:50,614 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T08:34:50,619 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T08:34:50,621 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T08:34:50,634 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T08:34:50,637 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T08:34:50,642 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T08:34:50,654 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T08:34:50,656 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T08:34:50,666 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T08:34:50,688 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T08:34:50,697 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T08:34:50,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:34:50,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:34:50,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,713 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c27dd56784bd,46533,1731746089076, sessionid=0x10142c840e70000, setting cluster-up flag (Was=false) 2024-11-16T08:34:50,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,771 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T08:34:50,775 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,46533,1731746089076 2024-11-16T08:34:50,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:50,834 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T08:34:50,837 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,46533,1731746089076 2024-11-16T08:34:50,844 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T08:34:50,854 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(746): ClusterId : 254dc117-a069-41d0-839b-cb811ef0c897 2024-11-16T08:34:50,856 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T08:34:50,868 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T08:34:50,868 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T08:34:50,878 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T08:34:50,879 DEBUG [RS:0;c27dd56784bd:34865 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@579036d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:34:50,897 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c27dd56784bd:34865 2024-11-16T08:34:50,900 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T08:34:50,900 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T08:34:50,900 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T08:34:50,903 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(2659): reportForDuty to master=c27dd56784bd,46533,1731746089076 with port=34865, startcode=1731746089868 2024-11-16T08:34:50,913 DEBUG [RS:0;c27dd56784bd:34865 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T08:34:50,915 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T08:34:50,923 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T08:34:50,929 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T08:34:50,936 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c27dd56784bd,46533,1731746089076 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T08:34:50,945 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:34:50,945 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:34:50,945 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:34:50,945 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:34:50,946 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c27dd56784bd:0, corePoolSize=10, maxPoolSize=10 2024-11-16T08:34:50,946 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:50,946 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:34:50,946 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:50,959 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731746120959 2024-11-16T08:34:50,960 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:34:50,960 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T08:34:50,961 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T08:34:50,963 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T08:34:50,966 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T08:34:50,967 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T08:34:50,967 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T08:34:50,967 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T08:34:50,968 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:50,969 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T08:34:50,970 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:50,976 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T08:34:50,978 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T08:34:50,979 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T08:34:50,984 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T08:34:50,985 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T08:34:50,986 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49555, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T08:34:50,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:34:50,987 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746090986,5,FailOnTimeoutGroup] 2024-11-16T08:34:50,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:34:50,990 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T08:34:50,991 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d 2024-11-16T08:34:50,995 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746090988,5,FailOnTimeoutGroup] 2024-11-16T08:34:50,995 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:50,995 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T08:34:50,997 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:50,997 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:50,994 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46533 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-16T08:34:51,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:34:51,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:34:51,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:34:51,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:34:51,026 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:34:51,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:51,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:34:51,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:34:51,031 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:34:51,032 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:51,033 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-16T08:34:51,033 WARN [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-16T08:34:51,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:34:51,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:34:51,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:34:51,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:51,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:34:51,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:34:51,043 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:34:51,043 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:51,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:34:51,045 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:34:51,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740 2024-11-16T08:34:51,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740 2024-11-16T08:34:51,051 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:34:51,052 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:34:51,053 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:34:51,055 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:34:51,059 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:34:51,061 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744101, jitterRate=-0.05382704734802246}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:34:51,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731746091021Initializing all the Stores at 1731746091023 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746091023Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746091023Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746091023Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746091023Cleaning up temporary data from old regions at 1731746091052 (+29 ms)Region opened successfully at 1731746091066 (+14 ms) 2024-11-16T08:34:51,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:34:51,066 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:34:51,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:34:51,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:34:51,067 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:34:51,068 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:34:51,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746091066Disabling compacts and flushes for region at 1731746091066Disabling writes for close at 1731746091067 (+1 ms)Writing region close event to WAL at 1731746091068 (+1 ms)Closed at 1731746091068 2024-11-16T08:34:51,072 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:34:51,072 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T08:34:51,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T08:34:51,089 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:34:51,092 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T08:34:51,135 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(2659): reportForDuty to master=c27dd56784bd,46533,1731746089076 with port=34865, startcode=1731746089868 2024-11-16T08:34:51,137 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46533 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c27dd56784bd,34865,1731746089868 2024-11-16T08:34:51,140 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46533 {}] master.ServerManager(517): Registering regionserver=c27dd56784bd,34865,1731746089868 2024-11-16T08:34:51,147 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d 2024-11-16T08:34:51,147 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46127 2024-11-16T08:34:51,147 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T08:34:51,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:34:51,162 DEBUG [RS:0;c27dd56784bd:34865 {}] zookeeper.ZKUtil(111): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c27dd56784bd,34865,1731746089868 2024-11-16T08:34:51,162 WARN [RS:0;c27dd56784bd:34865 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:34:51,162 INFO [RS:0;c27dd56784bd:34865 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:34:51,162 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868 2024-11-16T08:34:51,165 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c27dd56784bd,34865,1731746089868] 2024-11-16T08:34:51,193 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T08:34:51,211 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T08:34:51,216 INFO [RS:0;c27dd56784bd:34865 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T08:34:51,217 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,218 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T08:34:51,223 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T08:34:51,225 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,226 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,226 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,226 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,226 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,227 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,227 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:34:51,227 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,227 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,227 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,227 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,227 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,228 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:34:51,228 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:34:51,228 DEBUG [RS:0;c27dd56784bd:34865 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:34:51,229 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,229 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,230 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,230 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,230 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,230 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34865,1731746089868-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:34:51,243 WARN [c27dd56784bd:46533 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T08:34:51,255 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T08:34:51,257 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34865,1731746089868-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,258 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,258 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.Replication(171): c27dd56784bd,34865,1731746089868 started 2024-11-16T08:34:51,283 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:51,283 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1482): Serving as c27dd56784bd,34865,1731746089868, RpcServer on c27dd56784bd/172.17.0.3:34865, sessionid=0x10142c840e70001 2024-11-16T08:34:51,284 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T08:34:51,284 DEBUG [RS:0;c27dd56784bd:34865 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c27dd56784bd,34865,1731746089868 2024-11-16T08:34:51,285 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,34865,1731746089868' 2024-11-16T08:34:51,285 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T08:34:51,286 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T08:34:51,287 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T08:34:51,287 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T08:34:51,287 DEBUG [RS:0;c27dd56784bd:34865 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c27dd56784bd,34865,1731746089868 2024-11-16T08:34:51,287 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,34865,1731746089868' 2024-11-16T08:34:51,287 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T08:34:51,288 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T08:34:51,289 DEBUG [RS:0;c27dd56784bd:34865 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T08:34:51,289 INFO [RS:0;c27dd56784bd:34865 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T08:34:51,289 INFO [RS:0;c27dd56784bd:34865 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T08:34:51,404 INFO [RS:0;c27dd56784bd:34865 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C34865%2C1731746089868, suffix=, logDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868, archiveDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs, maxLogs=32 2024-11-16T08:34:51,407 INFO [RS:0;c27dd56784bd:34865 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34865%2C1731746089868.1731746091407 2024-11-16T08:34:51,416 INFO [RS:0;c27dd56784bd:34865 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746091407 2024-11-16T08:34:51,420 DEBUG [RS:0;c27dd56784bd:34865 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44653:44653),(127.0.0.1/127.0.0.1:34955:34955)] 2024-11-16T08:34:51,499 DEBUG [c27dd56784bd:46533 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T08:34:51,513 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c27dd56784bd,34865,1731746089868 2024-11-16T08:34:51,518 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,34865,1731746089868, state=OPENING 2024-11-16T08:34:51,727 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T08:34:51,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:51,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:34:51,741 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:34:51,741 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:34:51,743 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:34:51,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,34865,1731746089868}] 2024-11-16T08:34:51,919 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T08:34:51,923 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34573, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T08:34:51,936 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T08:34:51,937 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:34:51,941 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C34865%2C1731746089868.meta, suffix=.meta, logDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868, archiveDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs, maxLogs=32 2024-11-16T08:34:51,943 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34865%2C1731746089868.meta.1731746091943.meta 2024-11-16T08:34:51,952 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.meta.1731746091943.meta 2024-11-16T08:34:51,956 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34955:34955),(127.0.0.1/127.0.0.1:44653:44653)] 2024-11-16T08:34:51,959 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:34:51,961 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T08:34:51,963 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T08:34:51,968 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T08:34:51,972 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T08:34:51,973 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:34:51,973 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T08:34:51,973 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T08:34:51,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:34:51,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:34:51,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:51,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:34:51,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:34:51,981 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:34:51,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:51,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:34:51,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:34:51,985 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:34:51,985 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:51,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:34:51,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:34:51,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:34:51,989 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:51,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:34:51,990 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:34:51,992 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740 2024-11-16T08:34:51,995 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740 2024-11-16T08:34:51,997 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:34:51,998 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:34:51,999 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:34:52,002 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:34:52,004 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759220, jitterRate=-0.034602269530296326}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:34:52,005 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T08:34:52,006 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731746091974Writing region info on filesystem at 1731746091974Initializing all the Stores at 1731746091976 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746091976Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746091976Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746091976Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746091976Cleaning up temporary data from old regions at 1731746091998 (+22 ms)Running coprocessor post-open hooks at 1731746092005 (+7 ms)Region opened successfully at 1731746092006 (+1 ms) 2024-11-16T08:34:52,013 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731746091911 2024-11-16T08:34:52,025 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T08:34:52,026 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T08:34:52,028 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,34865,1731746089868 2024-11-16T08:34:52,032 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,34865,1731746089868, state=OPEN 2024-11-16T08:34:52,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:34:52,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:34:52,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:34:52,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:34:52,120 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c27dd56784bd,34865,1731746089868 2024-11-16T08:34:52,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T08:34:52,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,34865,1731746089868 in 375 msec 2024-11-16T08:34:52,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T08:34:52,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0510 sec 2024-11-16T08:34:52,139 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:34:52,139 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T08:34:52,164 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:34:52,165 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,34865,1731746089868, seqNum=-1] 2024-11-16T08:34:52,191 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:34:52,194 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49279, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:34:52,214 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3380 sec 2024-11-16T08:34:52,214 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731746092214, completionTime=-1 2024-11-16T08:34:52,217 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T08:34:52,217 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T08:34:52,243 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T08:34:52,243 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731746152243 2024-11-16T08:34:52,243 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731746212243 2024-11-16T08:34:52,243 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 25 msec 2024-11-16T08:34:52,246 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,46533,1731746089076-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:52,246 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,46533,1731746089076-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:52,246 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,46533,1731746089076-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:52,247 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c27dd56784bd:46533, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:52,248 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:52,248 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:52,256 DEBUG [master/c27dd56784bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T08:34:52,275 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.250sec 2024-11-16T08:34:52,277 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T08:34:52,278 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T08:34:52,279 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T08:34:52,280 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T08:34:52,280 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T08:34:52,280 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,46533,1731746089076-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:34:52,281 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,46533,1731746089076-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T08:34:52,290 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T08:34:52,291 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T08:34:52,292 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,46533,1731746089076-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:34:52,364 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:34:52,366 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-16T08:34:52,366 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-16T08:34:52,370 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c27dd56784bd,46533,-1 for getting cluster id 2024-11-16T08:34:52,374 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T08:34:52,383 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '254dc117-a069-41d0-839b-cb811ef0c897' 2024-11-16T08:34:52,385 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T08:34:52,385 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "254dc117-a069-41d0-839b-cb811ef0c897" 2024-11-16T08:34:52,388 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51d365bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:34:52,388 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c27dd56784bd,46533,-1] 2024-11-16T08:34:52,390 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T08:34:52,392 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:34:52,394 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42262, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T08:34:52,397 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:34:52,398 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:34:52,406 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,34865,1731746089868, seqNum=-1] 2024-11-16T08:34:52,406 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:34:52,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34794, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:34:52,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c27dd56784bd,46533,1731746089076 2024-11-16T08:34:52,431 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:34:52,442 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T08:34:52,448 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T08:34:52,454 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c27dd56784bd,46533,1731746089076 2024-11-16T08:34:52,458 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@65d48f8e 2024-11-16T08:34:52,459 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T08:34:52,465 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42278, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T08:34:52,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46533 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T08:34:52,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46533 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T08:34:52,473 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46533 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:34:52,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46533 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-16T08:34:52,488 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T08:34:52,491 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46533 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-16T08:34:52,491 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:52,494 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T08:34:52,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46533 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:34:52,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741835_1011 (size=389) 2024-11-16T08:34:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741835_1011 (size=389) 2024-11-16T08:34:52,563 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f45a6132913f1e9ffa1461d6d2bb317f, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d 2024-11-16T08:34:52,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741836_1012 (size=72) 2024-11-16T08:34:52,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741836_1012 (size=72) 2024-11-16T08:34:52,584 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:34:52,584 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing f45a6132913f1e9ffa1461d6d2bb317f, disabling compactions & flushes 2024-11-16T08:34:52,585 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:34:52,585 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:34:52,585 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. after waiting 0 ms 2024-11-16T08:34:52,585 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:34:52,585 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:34:52,585 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for f45a6132913f1e9ffa1461d6d2bb317f: Waiting for close lock at 1731746092584Disabling compacts and flushes for region at 1731746092584Disabling writes for close at 1731746092585 (+1 ms)Writing region close event to WAL at 1731746092585Closed at 1731746092585 2024-11-16T08:34:52,588 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T08:34:52,593 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731746092588"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731746092588"}]},"ts":"1731746092588"} 2024-11-16T08:34:52,599 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T08:34:52,602 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T08:34:52,606 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746092602"}]},"ts":"1731746092602"} 2024-11-16T08:34:52,612 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-16T08:34:52,614 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f45a6132913f1e9ffa1461d6d2bb317f, ASSIGN}] 2024-11-16T08:34:52,617 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f45a6132913f1e9ffa1461d6d2bb317f, ASSIGN 2024-11-16T08:34:52,619 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f45a6132913f1e9ffa1461d6d2bb317f, ASSIGN; state=OFFLINE, location=c27dd56784bd,34865,1731746089868; forceNewPlan=false, retain=false 2024-11-16T08:34:52,771 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f45a6132913f1e9ffa1461d6d2bb317f, regionState=OPENING, regionLocation=c27dd56784bd,34865,1731746089868 2024-11-16T08:34:52,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f45a6132913f1e9ffa1461d6d2bb317f, ASSIGN because future has completed 2024-11-16T08:34:52,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f45a6132913f1e9ffa1461d6d2bb317f, server=c27dd56784bd,34865,1731746089868}] 2024-11-16T08:34:52,941 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:34:52,941 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f45a6132913f1e9ffa1461d6d2bb317f, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:34:52,942 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,942 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:34:52,942 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,942 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,945 INFO [StoreOpener-f45a6132913f1e9ffa1461d6d2bb317f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,947 INFO [StoreOpener-f45a6132913f1e9ffa1461d6d2bb317f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f45a6132913f1e9ffa1461d6d2bb317f columnFamilyName info 2024-11-16T08:34:52,948 DEBUG [StoreOpener-f45a6132913f1e9ffa1461d6d2bb317f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:34:52,949 INFO [StoreOpener-f45a6132913f1e9ffa1461d6d2bb317f-1 {}] regionserver.HStore(327): Store=f45a6132913f1e9ffa1461d6d2bb317f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:34:52,949 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,950 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,951 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,952 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,952 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,956 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,959 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:34:52,960 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f45a6132913f1e9ffa1461d6d2bb317f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=778909, jitterRate=-0.009567156434059143}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T08:34:52,960 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:34:52,962 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f45a6132913f1e9ffa1461d6d2bb317f: Running coprocessor pre-open hook at 1731746092942Writing region info on filesystem at 1731746092942Initializing all the Stores at 1731746092944 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746092944Cleaning up temporary data from old regions at 1731746092952 (+8 ms)Running coprocessor post-open hooks at 1731746092960 (+8 ms)Region opened successfully at 1731746092961 (+1 ms) 2024-11-16T08:34:52,964 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f., pid=6, masterSystemTime=1731746092934 2024-11-16T08:34:52,968 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:34:52,968 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:34:52,969 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f45a6132913f1e9ffa1461d6d2bb317f, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,34865,1731746089868 2024-11-16T08:34:52,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f45a6132913f1e9ffa1461d6d2bb317f, server=c27dd56784bd,34865,1731746089868 because future has completed 2024-11-16T08:34:52,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T08:34:52,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f45a6132913f1e9ffa1461d6d2bb317f, server=c27dd56784bd,34865,1731746089868 in 198 msec 2024-11-16T08:34:52,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T08:34:52,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=f45a6132913f1e9ffa1461d6d2bb317f, ASSIGN in 368 msec 2024-11-16T08:34:52,990 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T08:34:52,990 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746092990"}]},"ts":"1731746092990"} 2024-11-16T08:34:52,996 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-16T08:34:52,998 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T08:34:53,003 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 520 msec 2024-11-16T08:34:57,505 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-16T08:34:57,553 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T08:34:57,554 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-16T08:34:59,420 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T08:34:59,421 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T08:34:59,425 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T08:34:59,425 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T08:34:59,426 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:34:59,426 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T08:34:59,426 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T08:34:59,426 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T08:35:02,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46533 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:35:02,594 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-16T08:35:02,601 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-16T08:35:02,608 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-16T08:35:02,609 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:35:02,610 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34865%2C1731746089868.1731746102610 2024-11-16T08:35:02,672 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:02,673 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:02,673 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:02,673 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:02,673 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:02,674 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746091407 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746102610 2024-11-16T08:35:02,675 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44653:44653),(127.0.0.1/127.0.0.1:34955:34955)] 2024-11-16T08:35:02,675 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746091407 is not closed yet, will try archiving it next time 2024-11-16T08:35:02,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741833_1009 (size=451) 2024-11-16T08:35:02,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741833_1009 (size=451) 2024-11-16T08:35:02,678 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746091407 to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs/c27dd56784bd%2C34865%2C1731746089868.1731746091407 2024-11-16T08:35:02,684 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f., hostname=c27dd56784bd,34865,1731746089868, seqNum=2] 2024-11-16T08:35:14,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34865 {}] regionserver.HRegion(8855): Flush requested on f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:35:14,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f45a6132913f1e9ffa1461d6d2bb317f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:35:14,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/59ac1ec19a9e4a1b8f7aaccf085cddec is 1080, key is row0001/info:/1731746102686/Put/seqid=0 2024-11-16T08:35:14,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741838_1014 (size=12509) 2024-11-16T08:35:14,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741838_1014 (size=12509) 2024-11-16T08:35:14,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/59ac1ec19a9e4a1b8f7aaccf085cddec 2024-11-16T08:35:14,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/59ac1ec19a9e4a1b8f7aaccf085cddec as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/59ac1ec19a9e4a1b8f7aaccf085cddec 2024-11-16T08:35:14,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/59ac1ec19a9e4a1b8f7aaccf085cddec, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T08:35:14,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f45a6132913f1e9ffa1461d6d2bb317f in 184ms, sequenceid=11, compaction requested=false 2024-11-16T08:35:14,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f45a6132913f1e9ffa1461d6d2bb317f: 2024-11-16T08:35:18,199 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T08:35:22,748 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34865%2C1731746089868.1731746122747 2024-11-16T08:35:22,958 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:22,958 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:22,959 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:22,959 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:22,959 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:22,959 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:22,959 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746102610 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746122747 2024-11-16T08:35:22,960 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34955:34955),(127.0.0.1/127.0.0.1:44653:44653)] 2024-11-16T08:35:22,960 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746102610 is not closed yet, will try archiving it next time 2024-11-16T08:35:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741837_1013 (size=12399) 2024-11-16T08:35:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741837_1013 (size=12399) 2024-11-16T08:35:23,163 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:25,367 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:27,572 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:29,775 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:29,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34865 {}] regionserver.HRegion(8855): Flush requested on f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:35:29,776 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f45a6132913f1e9ffa1461d6d2bb317f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:35:29,978 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:29,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/09a104b4bd034f4b88126e0fd8e6ce02 is 1080, key is row0008/info:/1731746116735/Put/seqid=0 2024-11-16T08:35:29,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741840_1016 (size=12509) 2024-11-16T08:35:29,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741840_1016 (size=12509) 2024-11-16T08:35:29,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/09a104b4bd034f4b88126e0fd8e6ce02 2024-11-16T08:35:30,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/09a104b4bd034f4b88126e0fd8e6ce02 as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/09a104b4bd034f4b88126e0fd8e6ce02 2024-11-16T08:35:30,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/09a104b4bd034f4b88126e0fd8e6ce02, entries=7, sequenceid=21, filesize=12.2 K 2024-11-16T08:35:30,222 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:30,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f45a6132913f1e9ffa1461d6d2bb317f in 446ms, sequenceid=21, compaction requested=false 2024-11-16T08:35:30,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f45a6132913f1e9ffa1461d6d2bb317f: 2024-11-16T08:35:30,222 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-16T08:35:30,222 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:35:30,223 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/59ac1ec19a9e4a1b8f7aaccf085cddec because midkey is the same as first or last row 2024-11-16T08:35:31,979 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:32,294 INFO [master/c27dd56784bd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T08:35:32,294 INFO [master/c27dd56784bd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T08:35:34,183 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:34,185 WARN [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:34,186 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C34865%2C1731746089868:(num 1731746122747) roll requested 2024-11-16T08:35:34,186 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34865%2C1731746089868.1731746134186 2024-11-16T08:35:34,393 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK], DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK]] 2024-11-16T08:35:34,394 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:34,394 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:34,394 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:34,394 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:34,394 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:34,394 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746122747 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746134186 2024-11-16T08:35:34,397 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44653:44653),(127.0.0.1/127.0.0.1:34955:34955)] 2024-11-16T08:35:34,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741839_1015 (size=7739) 2024-11-16T08:35:34,397 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746122747 is not closed yet, will try archiving it next time 2024-11-16T08:35:34,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741839_1015 (size=7739) 2024-11-16T08:35:34,397 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746102610 to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs/c27dd56784bd%2C34865%2C1731746089868.1731746102610 2024-11-16T08:35:36,387 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:37,942 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f45a6132913f1e9ffa1461d6d2bb317f, had cached 0 bytes from a total of 25018 2024-11-16T08:35:38,591 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:40,795 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:42,999 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:45,003 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T08:35:45,005 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34865%2C1731746089868.1731746145004 2024-11-16T08:35:48,199 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T08:35:50,020 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:50,023 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:50,023 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C34865%2C1731746089868:(num 1731746145004) roll requested 2024-11-16T08:35:50,024 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:50,024 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:50,024 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:50,024 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:50,025 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:35:50,025 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746134186 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746145004 2024-11-16T08:35:50,029 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44653:44653),(127.0.0.1/127.0.0.1:34955:34955)] 2024-11-16T08:35:50,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741841_1017 (size=4753) 2024-11-16T08:35:50,029 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746134186 is not closed yet, will try archiving it next time 2024-11-16T08:35:50,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741841_1017 (size=4753) 2024-11-16T08:35:50,029 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34865%2C1731746089868.1731746150029 2024-11-16T08:35:55,034 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:55,034 WARN [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:55,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34865 {}] regionserver.HRegion(8855): Flush requested on f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:35:55,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f45a6132913f1e9ffa1461d6d2bb317f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:35:55,044 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:55,044 WARN [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:35:57,037 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T08:36:00,038 INFO [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:36:00,038 WARN [FSHLog-0-hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d-prefix:c27dd56784bd,34865,1731746089868 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44245,DS-690cbe74-3bb2-47cb-8834-d353bcd5ef16,DISK], DatanodeInfoWithStorage[127.0.0.1:44307,DS-a6fb0d9c-84d3-4abf-a856-a5b29ef5aee5,DISK]] 2024-11-16T08:36:00,038 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,038 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,039 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,039 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,039 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,039 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746145004 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746150029 2024-11-16T08:36:00,040 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44653:44653),(127.0.0.1/127.0.0.1:34955:34955)] 2024-11-16T08:36:00,040 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746145004 is not closed yet, will try archiving it next time 2024-11-16T08:36:00,040 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C34865%2C1731746089868:(num 1731746160040) roll requested 2024-11-16T08:36:00,041 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34865%2C1731746089868.1731746160040 2024-11-16T08:36:00,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741842_1018 (size=1569) 2024-11-16T08:36:00,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741842_1018 (size=1569) 2024-11-16T08:36:00,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/d92391005fb64ac6ae8de023a22905f0 is 1080, key is row0015/info:/1731746131778/Put/seqid=0 2024-11-16T08:36:00,055 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,056 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,056 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741844_1020 (size=12509) 2024-11-16T08:36:00,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741844_1020 (size=12509) 2024-11-16T08:36:00,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/d92391005fb64ac6ae8de023a22905f0 2024-11-16T08:36:00,065 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,065 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,065 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746150029 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746160040 2024-11-16T08:36:00,066 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34955:34955),(127.0.0.1/127.0.0.1:44653:44653)] 2024-11-16T08:36:00,066 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746150029 is not closed yet, will try archiving it next time 2024-11-16T08:36:00,067 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34865%2C1731746089868.1731746160066 2024-11-16T08:36:00,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741843_1019 (size=93) 2024-11-16T08:36:00,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741843_1019 (size=93) 2024-11-16T08:36:00,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/d92391005fb64ac6ae8de023a22905f0 as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/d92391005fb64ac6ae8de023a22905f0 2024-11-16T08:36:00,080 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,080 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,080 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,081 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/d92391005fb64ac6ae8de023a22905f0, entries=7, sequenceid=31, filesize=12.2 K 2024-11-16T08:36:00,081 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:00,081 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746160040 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746160066 2024-11-16T08:36:00,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741845_1021 (size=1258) 2024-11-16T08:36:00,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741845_1021 (size=1258) 2024-11-16T08:36:00,084 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746150029 is not closed yet, will try archiving it next time 2024-11-16T08:36:00,088 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34955:34955),(127.0.0.1/127.0.0.1:44653:44653)] 2024-11-16T08:36:00,088 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746150029 is not closed yet, will try archiving it next time 2024-11-16T08:36:00,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for f45a6132913f1e9ffa1461d6d2bb317f in 5054ms, sequenceid=31, compaction requested=true 2024-11-16T08:36:00,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f45a6132913f1e9ffa1461d6d2bb317f: 2024-11-16T08:36:00,089 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-16T08:36:00,089 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:00,089 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/59ac1ec19a9e4a1b8f7aaccf085cddec because midkey is the same as first or last row 2024-11-16T08:36:00,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f45a6132913f1e9ffa1461d6d2bb317f:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:36:00,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:36:00,093 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:36:00,096 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:36:00,097 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.HStore(1541): f45a6132913f1e9ffa1461d6d2bb317f/info is initiating minor compaction (all files) 2024-11-16T08:36:00,098 INFO [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f45a6132913f1e9ffa1461d6d2bb317f/info in TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:36:00,098 INFO [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/59ac1ec19a9e4a1b8f7aaccf085cddec, hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/09a104b4bd034f4b88126e0fd8e6ce02, hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/d92391005fb64ac6ae8de023a22905f0] into tmpdir=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp, totalSize=36.6 K 2024-11-16T08:36:00,100 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] compactions.Compactor(225): Compacting 59ac1ec19a9e4a1b8f7aaccf085cddec, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731746102686 2024-11-16T08:36:00,100 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] compactions.Compactor(225): Compacting 09a104b4bd034f4b88126e0fd8e6ce02, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731746116735 2024-11-16T08:36:00,101 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] compactions.Compactor(225): Compacting d92391005fb64ac6ae8de023a22905f0, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731746131778 2024-11-16T08:36:00,130 INFO [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f45a6132913f1e9ffa1461d6d2bb317f#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:36:00,131 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/a35669ecbffc440e89fac197a459ffa1 is 1080, key is row0001/info:/1731746102686/Put/seqid=0 2024-11-16T08:36:00,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741847_1023 (size=27710) 2024-11-16T08:36:00,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741847_1023 (size=27710) 2024-11-16T08:36:00,148 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/a35669ecbffc440e89fac197a459ffa1 as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/a35669ecbffc440e89fac197a459ffa1 2024-11-16T08:36:00,163 INFO [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f45a6132913f1e9ffa1461d6d2bb317f/info of f45a6132913f1e9ffa1461d6d2bb317f into a35669ecbffc440e89fac197a459ffa1(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:36:00,163 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f45a6132913f1e9ffa1461d6d2bb317f: 2024-11-16T08:36:00,165 INFO [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f., storeName=f45a6132913f1e9ffa1461d6d2bb317f/info, priority=13, startTime=1731746160090; duration=0sec 2024-11-16T08:36:00,165 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T08:36:00,165 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:00,165 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/a35669ecbffc440e89fac197a459ffa1 because midkey is the same as first or last row 2024-11-16T08:36:00,166 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T08:36:00,166 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:00,166 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/a35669ecbffc440e89fac197a459ffa1 because midkey is the same as first or last row 2024-11-16T08:36:00,166 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-16T08:36:00,166 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:00,166 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/a35669ecbffc440e89fac197a459ffa1 because midkey is the same as first or last row 2024-11-16T08:36:00,166 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:36:00,166 DEBUG [RS:0;c27dd56784bd:34865-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f45a6132913f1e9ffa1461d6d2bb317f:info 2024-11-16T08:36:00,470 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746122747 to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs/c27dd56784bd%2C34865%2C1731746089868.1731746122747 2024-11-16T08:36:00,472 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746134186 to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs/c27dd56784bd%2C34865%2C1731746089868.1731746134186 2024-11-16T08:36:00,473 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746145004 to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs/c27dd56784bd%2C34865%2C1731746089868.1731746145004 2024-11-16T08:36:00,475 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/WALs/c27dd56784bd,34865,1731746089868/c27dd56784bd%2C34865%2C1731746089868.1731746150029 to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs/c27dd56784bd%2C34865%2C1731746089868.1731746150029 2024-11-16T08:36:12,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34865 {}] regionserver.HRegion(8855): Flush requested on f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:36:12,101 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f45a6132913f1e9ffa1461d6d2bb317f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:36:12,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/91187eb52aa040aeafd2f013bb2a9341 is 1080, key is row0022/info:/1731746160068/Put/seqid=0 2024-11-16T08:36:12,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741848_1024 (size=12509) 2024-11-16T08:36:12,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741848_1024 (size=12509) 2024-11-16T08:36:12,117 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/91187eb52aa040aeafd2f013bb2a9341 2024-11-16T08:36:12,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/91187eb52aa040aeafd2f013bb2a9341 as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/91187eb52aa040aeafd2f013bb2a9341 2024-11-16T08:36:12,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/91187eb52aa040aeafd2f013bb2a9341, entries=7, sequenceid=42, filesize=12.2 K 2024-11-16T08:36:12,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f45a6132913f1e9ffa1461d6d2bb317f in 43ms, sequenceid=42, compaction requested=false 2024-11-16T08:36:12,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f45a6132913f1e9ffa1461d6d2bb317f: 2024-11-16T08:36:12,145 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-16T08:36:12,145 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:12,145 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/a35669ecbffc440e89fac197a459ffa1 because midkey is the same as first or last row 2024-11-16T08:36:18,200 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T08:36:20,118 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T08:36:20,118 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:36:20,118 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:36:20,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:20,124 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:20,124 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T08:36:20,124 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T08:36:20,124 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=368815740, stopped=false 2024-11-16T08:36:20,124 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c27dd56784bd,46533,1731746089076 2024-11-16T08:36:20,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:36:20,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:36:20,188 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:36:20,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:20,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:20,188 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:36:20,188 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:36:20,188 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:20,189 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:36:20,189 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:36:20,189 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c27dd56784bd,34865,1731746089868' ***** 2024-11-16T08:36:20,189 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T08:36:20,189 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T08:36:20,189 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T08:36:20,189 INFO [RS:0;c27dd56784bd:34865 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T08:36:20,190 INFO [RS:0;c27dd56784bd:34865 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T08:36:20,190 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(3091): Received CLOSE for f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:36:20,190 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(959): stopping server c27dd56784bd,34865,1731746089868 2024-11-16T08:36:20,190 INFO [RS:0;c27dd56784bd:34865 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:36:20,190 INFO [RS:0;c27dd56784bd:34865 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c27dd56784bd:34865. 2024-11-16T08:36:20,190 DEBUG [RS:0;c27dd56784bd:34865 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:36:20,190 DEBUG [RS:0;c27dd56784bd:34865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:20,191 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f45a6132913f1e9ffa1461d6d2bb317f, disabling compactions & flushes 2024-11-16T08:36:20,191 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T08:36:20,191 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:36:20,191 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T08:36:20,191 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:36:20,191 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T08:36:20,191 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. after waiting 0 ms 2024-11-16T08:36:20,191 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:36:20,191 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T08:36:20,191 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing f45a6132913f1e9ffa1461d6d2bb317f 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-16T08:36:20,191 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T08:36:20,191 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, f45a6132913f1e9ffa1461d6d2bb317f=TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.} 2024-11-16T08:36:20,191 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:36:20,191 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:36:20,191 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:36:20,192 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:36:20,192 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:36:20,192 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:36:20,192 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-16T08:36:20,197 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/34b62c3f56924a5abc11cae62db9bbfe is 1080, key is row0029/info:/1731746174105/Put/seqid=0 2024-11-16T08:36:20,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741849_1025 (size=8193) 2024-11-16T08:36:20,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741849_1025 (size=8193) 2024-11-16T08:36:20,214 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/.tmp/info/e9ebd9df77364cb89faefb2a563ced23 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f./info:regioninfo/1731746092969/Put/seqid=0 2024-11-16T08:36:20,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741850_1026 (size=7016) 2024-11-16T08:36:20,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741850_1026 (size=7016) 2024-11-16T08:36:20,223 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/.tmp/info/e9ebd9df77364cb89faefb2a563ced23 2024-11-16T08:36:20,230 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T08:36:20,230 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T08:36:20,247 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/.tmp/ns/714431b4b1494cce8389f019f5e2c6d7 is 43, key is default/ns:d/1731746092198/Put/seqid=0 2024-11-16T08:36:20,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741851_1027 (size=5153) 2024-11-16T08:36:20,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741851_1027 (size=5153) 2024-11-16T08:36:20,254 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/.tmp/ns/714431b4b1494cce8389f019f5e2c6d7 2024-11-16T08:36:20,278 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/.tmp/table/de76ada724d141119273c62fea3c9ecc is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731746092990/Put/seqid=0 2024-11-16T08:36:20,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741852_1028 (size=5396) 2024-11-16T08:36:20,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741852_1028 (size=5396) 2024-11-16T08:36:20,286 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/.tmp/table/de76ada724d141119273c62fea3c9ecc 2024-11-16T08:36:20,295 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/.tmp/info/e9ebd9df77364cb89faefb2a563ced23 as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/info/e9ebd9df77364cb89faefb2a563ced23 2024-11-16T08:36:20,304 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/info/e9ebd9df77364cb89faefb2a563ced23, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T08:36:20,306 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/.tmp/ns/714431b4b1494cce8389f019f5e2c6d7 as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/ns/714431b4b1494cce8389f019f5e2c6d7 2024-11-16T08:36:20,315 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/ns/714431b4b1494cce8389f019f5e2c6d7, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T08:36:20,317 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/.tmp/table/de76ada724d141119273c62fea3c9ecc as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/table/de76ada724d141119273c62fea3c9ecc 2024-11-16T08:36:20,327 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/table/de76ada724d141119273c62fea3c9ecc, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T08:36:20,328 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-11-16T08:36:20,335 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T08:36:20,337 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:36:20,338 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:36:20,338 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746180191Running coprocessor pre-close hooks at 1731746180191Disabling compacts and flushes for region at 1731746180191Disabling writes for close at 1731746180192 (+1 ms)Obtaining lock to block concurrent updates at 1731746180192Preparing flush snapshotting stores in 1588230740 at 1731746180192Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731746180192Flushing stores of hbase:meta,,1.1588230740 at 1731746180193 (+1 ms)Flushing 1588230740/info: creating writer at 1731746180193Flushing 1588230740/info: appending metadata at 1731746180214 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731746180214Flushing 1588230740/ns: creating writer at 1731746180231 (+17 ms)Flushing 1588230740/ns: appending metadata at 1731746180246 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731746180246Flushing 1588230740/table: creating writer at 1731746180262 (+16 ms)Flushing 1588230740/table: appending metadata at 1731746180277 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731746180277Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7de7cf8e: reopening flushed file at 1731746180294 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d57e0db: reopening flushed file at 1731746180305 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3fa2207a: reopening flushed file at 1731746180316 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1731746180329 (+13 ms)Writing region close event to WAL at 1731746180330 (+1 ms)Running coprocessor post-close hooks at 1731746180336 (+6 ms)Closed at 1731746180337 (+1 ms) 2024-11-16T08:36:20,338 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T08:36:20,392 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1351): Waiting on f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:36:20,592 DEBUG [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1351): Waiting on f45a6132913f1e9ffa1461d6d2bb317f 2024-11-16T08:36:20,609 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/34b62c3f56924a5abc11cae62db9bbfe 2024-11-16T08:36:20,623 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/.tmp/info/34b62c3f56924a5abc11cae62db9bbfe as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/34b62c3f56924a5abc11cae62db9bbfe 2024-11-16T08:36:20,632 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/34b62c3f56924a5abc11cae62db9bbfe, entries=3, sequenceid=48, filesize=8.0 K 2024-11-16T08:36:20,634 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for f45a6132913f1e9ffa1461d6d2bb317f in 443ms, sequenceid=48, compaction requested=true 2024-11-16T08:36:20,635 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/59ac1ec19a9e4a1b8f7aaccf085cddec, hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/09a104b4bd034f4b88126e0fd8e6ce02, hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/d92391005fb64ac6ae8de023a22905f0] to archive 2024-11-16T08:36:20,638 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T08:36:20,640 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/59ac1ec19a9e4a1b8f7aaccf085cddec to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/59ac1ec19a9e4a1b8f7aaccf085cddec 2024-11-16T08:36:20,642 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/09a104b4bd034f4b88126e0fd8e6ce02 to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/09a104b4bd034f4b88126e0fd8e6ce02 2024-11-16T08:36:20,644 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/d92391005fb64ac6ae8de023a22905f0 to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/archive/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/info/d92391005fb64ac6ae8de023a22905f0 2024-11-16T08:36:20,653 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c27dd56784bd:46533 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T08:36:20,654 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [59ac1ec19a9e4a1b8f7aaccf085cddec=12509, 09a104b4bd034f4b88126e0fd8e6ce02=12509, d92391005fb64ac6ae8de023a22905f0=12509] 2024-11-16T08:36:20,660 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/data/default/TestLogRolling-testSlowSyncLogRolling/f45a6132913f1e9ffa1461d6d2bb317f/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-16T08:36:20,661 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:36:20,661 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f45a6132913f1e9ffa1461d6d2bb317f: Waiting for close lock at 1731746180190Running coprocessor pre-close hooks at 1731746180191 (+1 ms)Disabling compacts and flushes for region at 1731746180191Disabling writes for close at 1731746180191Obtaining lock to block concurrent updates at 1731746180191Preparing flush snapshotting stores in f45a6132913f1e9ffa1461d6d2bb317f at 1731746180191Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731746180191Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. at 1731746180192 (+1 ms)Flushing f45a6132913f1e9ffa1461d6d2bb317f/info: creating writer at 1731746180192Flushing f45a6132913f1e9ffa1461d6d2bb317f/info: appending metadata at 1731746180196 (+4 ms)Flushing f45a6132913f1e9ffa1461d6d2bb317f/info: closing flushed file at 1731746180196Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54eb81d8: reopening flushed file at 1731746180622 (+426 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for f45a6132913f1e9ffa1461d6d2bb317f in 443ms, sequenceid=48, compaction requested=true at 1731746180634 (+12 ms)Writing region close event to WAL at 1731746180655 (+21 ms)Running coprocessor post-close hooks at 1731746180661 (+6 ms)Closed at 1731746180661 2024-11-16T08:36:20,662 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731746092467.f45a6132913f1e9ffa1461d6d2bb317f. 2024-11-16T08:36:20,793 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(976): stopping server c27dd56784bd,34865,1731746089868; all regions closed. 2024-11-16T08:36:20,796 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,797 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,797 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,798 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,798 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741834_1010 (size=3066) 2024-11-16T08:36:20,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741834_1010 (size=3066) 2024-11-16T08:36:20,811 DEBUG [RS:0;c27dd56784bd:34865 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs 2024-11-16T08:36:20,811 INFO [RS:0;c27dd56784bd:34865 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C34865%2C1731746089868.meta:.meta(num 1731746091943) 2024-11-16T08:36:20,812 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,812 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,812 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,812 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,813 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:20,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741846_1022 (size=13040) 2024-11-16T08:36:20,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741846_1022 (size=13040) 2024-11-16T08:36:20,818 DEBUG [RS:0;c27dd56784bd:34865 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/oldWALs 2024-11-16T08:36:20,819 INFO [RS:0;c27dd56784bd:34865 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C34865%2C1731746089868:(num 1731746160066) 2024-11-16T08:36:20,819 DEBUG [RS:0;c27dd56784bd:34865 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:20,819 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:36:20,819 INFO [RS:0;c27dd56784bd:34865 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:36:20,819 INFO [RS:0;c27dd56784bd:34865 {}] hbase.ChoreService(370): Chore service for: regionserver/c27dd56784bd:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T08:36:20,819 INFO [RS:0;c27dd56784bd:34865 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:36:20,819 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:36:20,820 INFO [RS:0;c27dd56784bd:34865 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34865 2024-11-16T08:36:20,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c27dd56784bd,34865,1731746089868 2024-11-16T08:36:20,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:36:20,836 INFO [RS:0;c27dd56784bd:34865 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:36:20,847 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c27dd56784bd,34865,1731746089868] 2024-11-16T08:36:20,857 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c27dd56784bd,34865,1731746089868 already deleted, retry=false 2024-11-16T08:36:20,858 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c27dd56784bd,34865,1731746089868 expired; onlineServers=0 2024-11-16T08:36:20,858 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c27dd56784bd,46533,1731746089076' ***** 2024-11-16T08:36:20,858 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T08:36:20,858 INFO [M:0;c27dd56784bd:46533 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:36:20,859 INFO [M:0;c27dd56784bd:46533 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:36:20,859 DEBUG [M:0;c27dd56784bd:46533 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T08:36:20,859 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T08:36:20,859 DEBUG [M:0;c27dd56784bd:46533 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T08:36:20,859 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746090986 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746090986,5,FailOnTimeoutGroup] 2024-11-16T08:36:20,859 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746090988 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746090988,5,FailOnTimeoutGroup] 2024-11-16T08:36:20,860 INFO [M:0;c27dd56784bd:46533 {}] hbase.ChoreService(370): Chore service for: master/c27dd56784bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T08:36:20,860 INFO [M:0;c27dd56784bd:46533 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:36:20,860 DEBUG [M:0;c27dd56784bd:46533 {}] master.HMaster(1795): Stopping service threads 2024-11-16T08:36:20,860 INFO [M:0;c27dd56784bd:46533 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T08:36:20,860 INFO [M:0;c27dd56784bd:46533 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:36:20,861 INFO [M:0;c27dd56784bd:46533 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T08:36:20,861 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T08:36:20,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T08:36:20,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:20,872 DEBUG [M:0;c27dd56784bd:46533 {}] zookeeper.ZKUtil(347): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T08:36:20,872 WARN [M:0;c27dd56784bd:46533 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T08:36:20,873 INFO [M:0;c27dd56784bd:46533 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/.lastflushedseqids 2024-11-16T08:36:20,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741853_1029 (size=130) 2024-11-16T08:36:20,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741853_1029 (size=130) 2024-11-16T08:36:20,886 INFO [M:0;c27dd56784bd:46533 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T08:36:20,886 INFO [M:0;c27dd56784bd:46533 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T08:36:20,886 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:36:20,887 INFO [M:0;c27dd56784bd:46533 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:20,887 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:20,887 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:36:20,887 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:20,887 INFO [M:0;c27dd56784bd:46533 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-11-16T08:36:20,904 DEBUG [M:0;c27dd56784bd:46533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b158404683104ab6bd8d6122f3a6e4ce is 82, key is hbase:meta,,1/info:regioninfo/1731746092028/Put/seqid=0 2024-11-16T08:36:20,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741854_1030 (size=5672) 2024-11-16T08:36:20,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741854_1030 (size=5672) 2024-11-16T08:36:20,910 INFO [M:0;c27dd56784bd:46533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b158404683104ab6bd8d6122f3a6e4ce 2024-11-16T08:36:20,931 DEBUG [M:0;c27dd56784bd:46533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/006a083601a74075858e0dde785bc9c7 is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731746093001/Put/seqid=0 2024-11-16T08:36:20,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741855_1031 (size=6246) 2024-11-16T08:36:20,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741855_1031 (size=6246) 2024-11-16T08:36:20,938 INFO [M:0;c27dd56784bd:46533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/006a083601a74075858e0dde785bc9c7 2024-11-16T08:36:20,943 INFO [M:0;c27dd56784bd:46533 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 006a083601a74075858e0dde785bc9c7 2024-11-16T08:36:20,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:36:20,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34865-0x10142c840e70001, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:36:20,948 INFO [RS:0;c27dd56784bd:34865 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:36:20,948 INFO [RS:0;c27dd56784bd:34865 {}] regionserver.HRegionServer(1031): Exiting; stopping=c27dd56784bd,34865,1731746089868; zookeeper connection closed. 2024-11-16T08:36:20,948 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@184b911e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@184b911e 2024-11-16T08:36:20,949 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T08:36:20,960 DEBUG [M:0;c27dd56784bd:46533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2dba4be5c0dd4cc0aeda9acc9db74308 is 69, key is c27dd56784bd,34865,1731746089868/rs:state/1731746091142/Put/seqid=0 2024-11-16T08:36:20,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741856_1032 (size=5156) 2024-11-16T08:36:20,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741856_1032 (size=5156) 2024-11-16T08:36:20,966 INFO [M:0;c27dd56784bd:46533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2dba4be5c0dd4cc0aeda9acc9db74308 2024-11-16T08:36:20,987 DEBUG [M:0;c27dd56784bd:46533 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/faea73bf607b47a2ab12138de01e9e84 is 52, key is load_balancer_on/state:d/1731746092438/Put/seqid=0 2024-11-16T08:36:20,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741857_1033 (size=5056) 2024-11-16T08:36:20,995 INFO [M:0;c27dd56784bd:46533 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/faea73bf607b47a2ab12138de01e9e84 2024-11-16T08:36:20,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741857_1033 (size=5056) 2024-11-16T08:36:21,008 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b158404683104ab6bd8d6122f3a6e4ce as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b158404683104ab6bd8d6122f3a6e4ce 2024-11-16T08:36:21,015 INFO [M:0;c27dd56784bd:46533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b158404683104ab6bd8d6122f3a6e4ce, entries=8, sequenceid=59, filesize=5.5 K 2024-11-16T08:36:21,016 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/006a083601a74075858e0dde785bc9c7 as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/006a083601a74075858e0dde785bc9c7 2024-11-16T08:36:21,023 INFO [M:0;c27dd56784bd:46533 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 006a083601a74075858e0dde785bc9c7 2024-11-16T08:36:21,024 INFO [M:0;c27dd56784bd:46533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/006a083601a74075858e0dde785bc9c7, entries=6, sequenceid=59, filesize=6.1 K 2024-11-16T08:36:21,025 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2dba4be5c0dd4cc0aeda9acc9db74308 as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2dba4be5c0dd4cc0aeda9acc9db74308 2024-11-16T08:36:21,032 INFO [M:0;c27dd56784bd:46533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2dba4be5c0dd4cc0aeda9acc9db74308, entries=1, sequenceid=59, filesize=5.0 K 2024-11-16T08:36:21,033 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/faea73bf607b47a2ab12138de01e9e84 as hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/faea73bf607b47a2ab12138de01e9e84 2024-11-16T08:36:21,040 INFO [M:0;c27dd56784bd:46533 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/faea73bf607b47a2ab12138de01e9e84, entries=1, sequenceid=59, filesize=4.9 K 2024-11-16T08:36:21,041 INFO [M:0;c27dd56784bd:46533 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=59, compaction requested=false 2024-11-16T08:36:21,043 INFO [M:0;c27dd56784bd:46533 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:21,043 DEBUG [M:0;c27dd56784bd:46533 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746180886Disabling compacts and flushes for region at 1731746180886Disabling writes for close at 1731746180887 (+1 ms)Obtaining lock to block concurrent updates at 1731746180887Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731746180887Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1731746180887Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731746180888 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731746180888Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731746180904 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731746180904Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731746180916 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731746180930 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731746180930Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731746180944 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731746180960 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731746180960Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731746180972 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731746180986 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731746180986Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65cb495e: reopening flushed file at 1731746181007 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14375f1: reopening flushed file at 1731746181015 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d6398e4: reopening flushed file at 1731746181024 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e0e6797: reopening flushed file at 1731746181032 (+8 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 154ms, sequenceid=59, compaction requested=false at 1731746181041 (+9 ms)Writing region close event to WAL at 1731746181043 (+2 ms)Closed at 1731746181043 2024-11-16T08:36:21,044 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:21,044 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:21,044 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:21,044 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:21,044 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:21,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44245 is added to blk_1073741830_1006 (size=27961) 2024-11-16T08:36:21,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44307 is added to blk_1073741830_1006 (size=27961) 2024-11-16T08:36:21,048 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:36:21,048 INFO [M:0;c27dd56784bd:46533 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T08:36:21,048 INFO [M:0;c27dd56784bd:46533 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46533 2024-11-16T08:36:21,048 INFO [M:0;c27dd56784bd:46533 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:36:21,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:36:21,158 INFO [M:0;c27dd56784bd:46533 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:36:21,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46533-0x10142c840e70000, quorum=127.0.0.1:62441, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:36:21,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ca1952e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:21,167 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75639b0e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:21,167 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:21,167 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:21,168 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:21,170 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:36:21,170 WARN [BP-712895127-172.17.0.3-1731746083643 heartbeating to localhost/127.0.0.1:46127 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:36:21,171 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:36:21,171 WARN [BP-712895127-172.17.0.3-1731746083643 heartbeating to localhost/127.0.0.1:46127 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-712895127-172.17.0.3-1731746083643 (Datanode Uuid e6418a5c-e3e7-4118-9ed9-a67e8748a7bc) service to localhost/127.0.0.1:46127 2024-11-16T08:36:21,172 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/data/data3/current/BP-712895127-172.17.0.3-1731746083643 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:21,173 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/data/data4/current/BP-712895127-172.17.0.3-1731746083643 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:21,173 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:36:21,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ca8488f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:21,181 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@dc1ca4f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:21,181 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:21,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:21,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:21,183 WARN [BP-712895127-172.17.0.3-1731746083643 heartbeating to localhost/127.0.0.1:46127 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:36:21,183 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:36:21,183 WARN [BP-712895127-172.17.0.3-1731746083643 heartbeating to localhost/127.0.0.1:46127 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-712895127-172.17.0.3-1731746083643 (Datanode Uuid 66151ea6-5765-4166-afe4-4071cd671cb6) service to localhost/127.0.0.1:46127 2024-11-16T08:36:21,183 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:36:21,184 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/data/data1/current/BP-712895127-172.17.0.3-1731746083643 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:21,184 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/cluster_96f5ee98-0442-5dac-b1be-cdff31b13ecc/data/data2/current/BP-712895127-172.17.0.3-1731746083643 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:21,184 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:36:21,192 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:36:21,193 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:21,193 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:21,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:21,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:21,203 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T08:36:21,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T08:36:21,237 INFO [regionserver/c27dd56784bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:36:21,242 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=81 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:46127 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:46127 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46127 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@5e1d09c6 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46127 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: master/c27dd56784bd:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/c27dd56784bd:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:46127 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/c27dd56784bd:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46127 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:46127 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/c27dd56784bd:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46127 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=400 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=242 (was 339), ProcessCount=11 (was 11), AvailableMemoryMB=2432 (was 3042) 2024-11-16T08:36:21,248 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=400, MaxFileDescriptor=1048576, SystemLoadAverage=242, ProcessCount=11, AvailableMemoryMB=2432 2024-11-16T08:36:21,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T08:36:21,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.log.dir so I do NOT create it in target/test-data/f1490217-89f5-508b-0279-d41bd0326215 2024-11-16T08:36:21,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d7ea962a-5163-7099-53cd-c8c5bf73cdba/hadoop.tmp.dir so I do NOT create it in target/test-data/f1490217-89f5-508b-0279-d41bd0326215 2024-11-16T08:36:21,249 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f, deleteOnExit=true 2024-11-16T08:36:21,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T08:36:21,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/test.cache.data in system properties and HBase conf 2024-11-16T08:36:21,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T08:36:21,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.log.dir in system properties and HBase conf 2024-11-16T08:36:21,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T08:36:21,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T08:36:21,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T08:36:21,249 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/nfs.dump.dir in system properties and HBase conf 2024-11-16T08:36:21,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/java.io.tmpdir in system properties and HBase conf 2024-11-16T08:36:21,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:36:21,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T08:36:21,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T08:36:21,263 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:36:21,629 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:21,635 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:21,637 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:21,637 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:21,637 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:36:21,638 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:21,638 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a15ed6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:21,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2152d149{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:21,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3fa5684d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/java.io.tmpdir/jetty-localhost-44499-hadoop-hdfs-3_4_1-tests_jar-_-any-11884112044223831380/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:36:21,735 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7bd218c7{HTTP/1.1, (http/1.1)}{localhost:44499} 2024-11-16T08:36:21,736 INFO [Time-limited test {}] server.Server(415): Started @99971ms 2024-11-16T08:36:21,747 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:36:21,992 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:21,996 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:21,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:21,997 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:21,997 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:36:21,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa9c156{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:21,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e7873b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:22,093 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2cd60cfb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/java.io.tmpdir/jetty-localhost-43109-hadoop-hdfs-3_4_1-tests_jar-_-any-17299323726408942544/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:22,094 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f88e14b{HTTP/1.1, (http/1.1)}{localhost:43109} 2024-11-16T08:36:22,094 INFO [Time-limited test {}] server.Server(415): Started @100330ms 2024-11-16T08:36:22,095 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:36:22,128 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:22,132 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:22,132 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:22,132 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:22,133 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:36:22,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d5e070a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:22,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@573af0f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:22,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b5e52bc{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/java.io.tmpdir/jetty-localhost-41755-hadoop-hdfs-3_4_1-tests_jar-_-any-16534609745226094420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:22,229 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@882842c{HTTP/1.1, (http/1.1)}{localhost:41755} 2024-11-16T08:36:22,229 INFO [Time-limited test {}] server.Server(415): Started @100465ms 2024-11-16T08:36:22,230 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:36:23,273 WARN [Thread-445 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/data/data1/current/BP-1465217183-172.17.0.3-1731746181275/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:23,273 WARN [Thread-446 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/data/data2/current/BP-1465217183-172.17.0.3-1731746181275/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:23,292 WARN [Thread-409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:36:23,295 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x528d307a4d8ce094 with lease ID 0x391bdc0e3e5be9f7: Processing first storage report for DS-0eee16cc-8319-466c-b16d-a26e2cc606d5 from datanode DatanodeRegistration(127.0.0.1:38815, datanodeUuid=0bcc3664-6ae7-43e4-baa8-ea12cc636143, infoPort=40781, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=2081185996;c=1731746181275) 2024-11-16T08:36:23,295 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x528d307a4d8ce094 with lease ID 0x391bdc0e3e5be9f7: from storage DS-0eee16cc-8319-466c-b16d-a26e2cc606d5 node DatanodeRegistration(127.0.0.1:38815, datanodeUuid=0bcc3664-6ae7-43e4-baa8-ea12cc636143, infoPort=40781, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=2081185996;c=1731746181275), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T08:36:23,295 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x528d307a4d8ce094 with lease ID 0x391bdc0e3e5be9f7: Processing first storage report for DS-3d051345-7ab3-4ab7-a96c-a8362e5e601a from datanode DatanodeRegistration(127.0.0.1:38815, datanodeUuid=0bcc3664-6ae7-43e4-baa8-ea12cc636143, infoPort=40781, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=2081185996;c=1731746181275) 2024-11-16T08:36:23,295 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x528d307a4d8ce094 with lease ID 0x391bdc0e3e5be9f7: from storage DS-3d051345-7ab3-4ab7-a96c-a8362e5e601a node DatanodeRegistration(127.0.0.1:38815, datanodeUuid=0bcc3664-6ae7-43e4-baa8-ea12cc636143, infoPort=40781, infoSecurePort=0, ipcPort=33115, storageInfo=lv=-57;cid=testClusterID;nsid=2081185996;c=1731746181275), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:23,411 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/data/data4/current/BP-1465217183-172.17.0.3-1731746181275/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:23,411 WARN [Thread-456 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/data/data3/current/BP-1465217183-172.17.0.3-1731746181275/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:23,429 WARN [Thread-432 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:36:23,431 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2b93f983684cbee5 with lease ID 0x391bdc0e3e5be9f8: Processing first storage report for DS-26b7f491-b525-44a0-a502-9ba472708fc4 from datanode DatanodeRegistration(127.0.0.1:40607, datanodeUuid=48b837a4-0a92-43bc-9fc8-357ad1fbdf4d, infoPort=39889, infoSecurePort=0, ipcPort=46375, storageInfo=lv=-57;cid=testClusterID;nsid=2081185996;c=1731746181275) 2024-11-16T08:36:23,431 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b93f983684cbee5 with lease ID 0x391bdc0e3e5be9f8: from storage DS-26b7f491-b525-44a0-a502-9ba472708fc4 node DatanodeRegistration(127.0.0.1:40607, datanodeUuid=48b837a4-0a92-43bc-9fc8-357ad1fbdf4d, infoPort=39889, infoSecurePort=0, ipcPort=46375, storageInfo=lv=-57;cid=testClusterID;nsid=2081185996;c=1731746181275), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:23,431 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2b93f983684cbee5 with lease ID 0x391bdc0e3e5be9f8: Processing first storage report for DS-e5a61d2e-5f81-4885-8e67-ae73704872f3 from datanode DatanodeRegistration(127.0.0.1:40607, datanodeUuid=48b837a4-0a92-43bc-9fc8-357ad1fbdf4d, infoPort=39889, infoSecurePort=0, ipcPort=46375, storageInfo=lv=-57;cid=testClusterID;nsid=2081185996;c=1731746181275) 2024-11-16T08:36:23,431 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b93f983684cbee5 with lease ID 0x391bdc0e3e5be9f8: from storage DS-e5a61d2e-5f81-4885-8e67-ae73704872f3 node DatanodeRegistration(127.0.0.1:40607, datanodeUuid=48b837a4-0a92-43bc-9fc8-357ad1fbdf4d, infoPort=39889, infoSecurePort=0, ipcPort=46375, storageInfo=lv=-57;cid=testClusterID;nsid=2081185996;c=1731746181275), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:23,473 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215 2024-11-16T08:36:23,477 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/zookeeper_0, clientPort=59632, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T08:36:23,478 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59632 2024-11-16T08:36:23,478 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:23,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:23,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:36:23,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:36:23,493 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087 with version=8 2024-11-16T08:36:23,493 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/hbase-staging 2024-11-16T08:36:23,495 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:36:23,495 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:23,495 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:23,495 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:36:23,495 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:23,495 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:36:23,495 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T08:36:23,495 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:36:23,496 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34751 2024-11-16T08:36:23,498 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34751 connecting to ZooKeeper ensemble=127.0.0.1:59632 2024-11-16T08:36:23,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:347510x0, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:36:23,570 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34751-0x10142c9b4e00000 connected 2024-11-16T08:36:23,663 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:23,668 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:23,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:36:23,675 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087, hbase.cluster.distributed=false 2024-11-16T08:36:23,677 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:36:23,678 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34751 2024-11-16T08:36:23,678 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34751 2024-11-16T08:36:23,678 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34751 2024-11-16T08:36:23,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34751 2024-11-16T08:36:23,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34751 2024-11-16T08:36:23,697 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:36:23,698 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:23,698 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:23,698 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:36:23,698 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:23,698 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:36:23,698 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T08:36:23,698 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:36:23,699 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39115 2024-11-16T08:36:23,700 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39115 connecting to ZooKeeper ensemble=127.0.0.1:59632 2024-11-16T08:36:23,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:23,703 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:23,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391150x0, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:36:23,714 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:36:23,714 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39115-0x10142c9b4e00001 connected 2024-11-16T08:36:23,715 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T08:36:23,715 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T08:36:23,716 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T08:36:23,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:36:23,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39115 2024-11-16T08:36:23,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39115 2024-11-16T08:36:23,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39115 2024-11-16T08:36:23,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39115 2024-11-16T08:36:23,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39115 2024-11-16T08:36:23,737 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c27dd56784bd:34751 2024-11-16T08:36:23,738 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c27dd56784bd,34751,1731746183495 2024-11-16T08:36:23,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:36:23,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:36:23,746 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c27dd56784bd,34751,1731746183495 2024-11-16T08:36:23,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:23,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T08:36:23,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:23,757 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T08:36:23,757 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c27dd56784bd,34751,1731746183495 from backup master directory 2024-11-16T08:36:23,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c27dd56784bd,34751,1731746183495 2024-11-16T08:36:23,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:36:23,766 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:36:23,767 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c27dd56784bd,34751,1731746183495 2024-11-16T08:36:23,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:36:23,771 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/hbase.id] with ID: fdfe7030-9694-44fd-b063-82fe2493a33c 2024-11-16T08:36:23,771 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/.tmp/hbase.id 2024-11-16T08:36:23,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:36:23,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:36:23,779 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/.tmp/hbase.id]:[hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/hbase.id] 2024-11-16T08:36:23,795 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:23,795 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T08:36:23,797 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T08:36:23,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:23,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:23,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:36:23,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:36:23,819 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:36:23,820 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T08:36:23,821 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:36:23,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:36:23,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:36:23,832 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store 2024-11-16T08:36:23,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:36:23,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:36:23,840 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:23,840 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:36:23,840 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:23,840 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:23,840 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:36:23,841 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:23,841 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:23,841 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746183840Disabling compacts and flushes for region at 1731746183840Disabling writes for close at 1731746183841 (+1 ms)Writing region close event to WAL at 1731746183841Closed at 1731746183841 2024-11-16T08:36:23,842 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/.initializing 2024-11-16T08:36:23,842 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/WALs/c27dd56784bd,34751,1731746183495 2024-11-16T08:36:23,845 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C34751%2C1731746183495, suffix=, logDir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/WALs/c27dd56784bd,34751,1731746183495, archiveDir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/oldWALs, maxLogs=10 2024-11-16T08:36:23,845 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34751%2C1731746183495.1731746183845 2024-11-16T08:36:23,851 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/WALs/c27dd56784bd,34751,1731746183495/c27dd56784bd%2C34751%2C1731746183495.1731746183845 2024-11-16T08:36:23,852 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40781:40781),(127.0.0.1/127.0.0.1:39889:39889)] 2024-11-16T08:36:23,856 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:36:23,857 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:23,857 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,857 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T08:36:23,861 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:23,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:23,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T08:36:23,863 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:23,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:36:23,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T08:36:23,866 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:23,866 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:36:23,867 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T08:36:23,868 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:23,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:36:23,869 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,870 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,870 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,872 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,872 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,873 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T08:36:23,874 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:23,877 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:36:23,878 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723639, jitterRate=-0.0798463225364685}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T08:36:23,879 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731746183857Initializing all the Stores at 1731746183858 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746183858Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746183858Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746183858Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746183858Cleaning up temporary data from old regions at 1731746183872 (+14 ms)Region opened successfully at 1731746183879 (+7 ms) 2024-11-16T08:36:23,879 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T08:36:23,884 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b8a63db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:36:23,886 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T08:36:23,886 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T08:36:23,886 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T08:36:23,886 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T08:36:23,887 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T08:36:23,887 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T08:36:23,887 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T08:36:23,890 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T08:36:23,891 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T08:36:23,903 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T08:36:23,904 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T08:36:23,904 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T08:36:23,914 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T08:36:23,914 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T08:36:23,915 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T08:36:23,924 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T08:36:23,926 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T08:36:23,935 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T08:36:23,939 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T08:36:23,952 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T08:36:23,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:36:23,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:36:23,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:23,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:23,964 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c27dd56784bd,34751,1731746183495, sessionid=0x10142c9b4e00000, setting cluster-up flag (Was=false) 2024-11-16T08:36:23,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:23,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:24,019 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T08:36:24,021 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,34751,1731746183495 2024-11-16T08:36:24,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:24,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:24,072 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T08:36:24,073 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,34751,1731746183495 2024-11-16T08:36:24,075 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T08:36:24,076 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T08:36:24,077 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T08:36:24,077 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T08:36:24,077 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c27dd56784bd,34751,1731746183495 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T08:36:24,079 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:36:24,079 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:36:24,079 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:36:24,079 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:36:24,079 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c27dd56784bd:0, corePoolSize=10, maxPoolSize=10 2024-11-16T08:36:24,079 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,079 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:36:24,079 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,080 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731746214080 2024-11-16T08:36:24,080 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T08:36:24,080 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T08:36:24,080 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T08:36:24,080 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T08:36:24,080 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T08:36:24,080 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T08:36:24,081 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,081 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T08:36:24,081 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T08:36:24,081 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T08:36:24,081 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:36:24,081 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T08:36:24,083 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:24,083 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T08:36:24,084 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T08:36:24,084 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T08:36:24,084 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746184084,5,FailOnTimeoutGroup] 2024-11-16T08:36:24,085 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746184084,5,FailOnTimeoutGroup] 2024-11-16T08:36:24,085 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,085 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T08:36:24,085 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,085 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:36:24,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:36:24,091 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T08:36:24,091 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087 2024-11-16T08:36:24,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:36:24,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:36:24,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:24,101 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:36:24,103 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:36:24,103 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:24,104 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:24,104 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:36:24,106 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:36:24,106 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:24,107 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:24,108 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:36:24,109 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:36:24,109 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:24,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:24,110 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:36:24,112 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:36:24,112 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:24,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:24,113 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:36:24,114 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740 2024-11-16T08:36:24,114 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740 2024-11-16T08:36:24,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:36:24,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:36:24,116 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:36:24,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:36:24,120 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:36:24,120 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711132, jitterRate=-0.09575009346008301}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:36:24,122 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731746184099Initializing all the Stores at 1731746184101 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746184101Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746184101Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746184101Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746184101Cleaning up temporary data from old regions at 1731746184115 (+14 ms)Region opened successfully at 1731746184122 (+7 ms) 2024-11-16T08:36:24,122 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:36:24,122 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:36:24,122 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:36:24,122 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:36:24,122 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:36:24,122 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(746): ClusterId : fdfe7030-9694-44fd-b063-82fe2493a33c 2024-11-16T08:36:24,122 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T08:36:24,122 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:36:24,122 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746184122Disabling compacts and flushes for region at 1731746184122Disabling writes for close at 1731746184122Writing region close event to WAL at 1731746184122Closed at 1731746184122 2024-11-16T08:36:24,124 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:36:24,124 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T08:36:24,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T08:36:24,126 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:36:24,127 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T08:36:24,136 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T08:36:24,136 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T08:36:24,146 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T08:36:24,147 DEBUG [RS:0;c27dd56784bd:39115 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@727f0b8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:36:24,158 DEBUG [RS:0;c27dd56784bd:39115 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c27dd56784bd:39115 2024-11-16T08:36:24,159 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T08:36:24,159 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T08:36:24,159 DEBUG [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T08:36:24,160 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(2659): reportForDuty to master=c27dd56784bd,34751,1731746183495 with port=39115, startcode=1731746183697 2024-11-16T08:36:24,160 DEBUG [RS:0;c27dd56784bd:39115 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T08:36:24,163 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44465, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T08:36:24,164 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34751 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c27dd56784bd,39115,1731746183697 2024-11-16T08:36:24,164 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34751 {}] master.ServerManager(517): Registering regionserver=c27dd56784bd,39115,1731746183697 2024-11-16T08:36:24,166 DEBUG [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087 2024-11-16T08:36:24,166 DEBUG [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37163 2024-11-16T08:36:24,166 DEBUG [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T08:36:24,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:36:24,178 DEBUG [RS:0;c27dd56784bd:39115 {}] zookeeper.ZKUtil(111): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c27dd56784bd,39115,1731746183697 2024-11-16T08:36:24,178 WARN [RS:0;c27dd56784bd:39115 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:36:24,178 INFO [RS:0;c27dd56784bd:39115 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:36:24,178 DEBUG [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/WALs/c27dd56784bd,39115,1731746183697 2024-11-16T08:36:24,178 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c27dd56784bd,39115,1731746183697] 2024-11-16T08:36:24,184 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T08:36:24,186 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T08:36:24,186 INFO [RS:0;c27dd56784bd:39115 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T08:36:24,186 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,186 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T08:36:24,187 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T08:36:24,188 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,188 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,188 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,188 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,188 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,188 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,188 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:36:24,188 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,188 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,189 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,189 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,189 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,189 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:24,189 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:36:24,189 DEBUG [RS:0;c27dd56784bd:39115 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:36:24,190 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,190 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,190 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,190 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,191 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,191 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,39115,1731746183697-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:36:24,209 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T08:36:24,209 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,39115,1731746183697-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,209 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,209 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.Replication(171): c27dd56784bd,39115,1731746183697 started 2024-11-16T08:36:24,223 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,223 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(1482): Serving as c27dd56784bd,39115,1731746183697, RpcServer on c27dd56784bd/172.17.0.3:39115, sessionid=0x10142c9b4e00001 2024-11-16T08:36:24,224 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T08:36:24,224 DEBUG [RS:0;c27dd56784bd:39115 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c27dd56784bd,39115,1731746183697 2024-11-16T08:36:24,224 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,39115,1731746183697' 2024-11-16T08:36:24,224 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T08:36:24,225 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T08:36:24,225 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T08:36:24,225 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T08:36:24,225 DEBUG [RS:0;c27dd56784bd:39115 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c27dd56784bd,39115,1731746183697 2024-11-16T08:36:24,225 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,39115,1731746183697' 2024-11-16T08:36:24,225 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T08:36:24,226 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T08:36:24,227 DEBUG [RS:0;c27dd56784bd:39115 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T08:36:24,227 INFO [RS:0;c27dd56784bd:39115 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T08:36:24,227 INFO [RS:0;c27dd56784bd:39115 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T08:36:24,278 WARN [c27dd56784bd:34751 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T08:36:24,331 INFO [RS:0;c27dd56784bd:39115 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C39115%2C1731746183697, suffix=, logDir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/WALs/c27dd56784bd,39115,1731746183697, archiveDir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/oldWALs, maxLogs=32 2024-11-16T08:36:24,333 INFO [RS:0;c27dd56784bd:39115 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C39115%2C1731746183697.1731746184333 2024-11-16T08:36:24,346 INFO [RS:0;c27dd56784bd:39115 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/WALs/c27dd56784bd,39115,1731746183697/c27dd56784bd%2C39115%2C1731746183697.1731746184333 2024-11-16T08:36:24,347 DEBUG [RS:0;c27dd56784bd:39115 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39889:39889),(127.0.0.1/127.0.0.1:40781:40781)] 2024-11-16T08:36:24,528 DEBUG [c27dd56784bd:34751 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T08:36:24,529 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c27dd56784bd,39115,1731746183697 2024-11-16T08:36:24,531 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,39115,1731746183697, state=OPENING 2024-11-16T08:36:24,591 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T08:36:24,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:24,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:24,685 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:36:24,685 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:36:24,685 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:36:24,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,39115,1731746183697}] 2024-11-16T08:36:24,842 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T08:36:24,844 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54653, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T08:36:24,849 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T08:36:24,849 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:36:24,851 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C39115%2C1731746183697.meta, suffix=.meta, logDir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/WALs/c27dd56784bd,39115,1731746183697, archiveDir=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/oldWALs, maxLogs=32 2024-11-16T08:36:24,853 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C39115%2C1731746183697.meta.1731746184853.meta 2024-11-16T08:36:24,860 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/WALs/c27dd56784bd,39115,1731746183697/c27dd56784bd%2C39115%2C1731746183697.meta.1731746184853.meta 2024-11-16T08:36:24,861 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39889:39889),(127.0.0.1/127.0.0.1:40781:40781)] 2024-11-16T08:36:24,861 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:36:24,861 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T08:36:24,862 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T08:36:24,862 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T08:36:24,862 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T08:36:24,862 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:24,862 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T08:36:24,862 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T08:36:24,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:36:24,865 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:36:24,865 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:24,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:24,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:36:24,867 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:36:24,867 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:24,867 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:24,867 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:36:24,868 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:36:24,868 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:24,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:24,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:36:24,870 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:36:24,870 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:24,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:24,871 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:36:24,872 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740 2024-11-16T08:36:24,873 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740 2024-11-16T08:36:24,875 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:36:24,875 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:36:24,876 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:36:24,877 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:36:24,878 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708878, jitterRate=-0.09861515462398529}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:36:24,879 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T08:36:24,880 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731746184862Writing region info on filesystem at 1731746184862Initializing all the Stores at 1731746184863 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746184863Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746184864 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746184864Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746184864Cleaning up temporary data from old regions at 1731746184875 (+11 ms)Running coprocessor post-open hooks at 1731746184879 (+4 ms)Region opened successfully at 1731746184880 (+1 ms) 2024-11-16T08:36:24,881 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731746184841 2024-11-16T08:36:24,884 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T08:36:24,884 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T08:36:24,885 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,39115,1731746183697 2024-11-16T08:36:24,886 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,39115,1731746183697, state=OPEN 2024-11-16T08:36:24,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:36:24,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:36:24,929 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c27dd56784bd,39115,1731746183697 2024-11-16T08:36:24,930 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:36:24,930 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:36:24,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T08:36:24,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,39115,1731746183697 in 245 msec 2024-11-16T08:36:24,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T08:36:24,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 814 msec 2024-11-16T08:36:24,942 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:36:24,942 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T08:36:24,944 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:36:24,944 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,39115,1731746183697, seqNum=-1] 2024-11-16T08:36:24,944 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:36:24,946 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60581, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:36:24,954 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 877 msec 2024-11-16T08:36:24,955 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731746184954, completionTime=-1 2024-11-16T08:36:24,955 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T08:36:24,955 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T08:36:24,957 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T08:36:24,957 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731746244957 2024-11-16T08:36:24,958 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731746304957 2024-11-16T08:36:24,958 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T08:36:24,958 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34751,1731746183495-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,958 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34751,1731746183495-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,958 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34751,1731746183495-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,958 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c27dd56784bd:34751, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,959 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,959 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:24,961 DEBUG [master/c27dd56784bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T08:36:24,965 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.198sec 2024-11-16T08:36:24,965 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T08:36:24,965 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T08:36:24,965 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T08:36:24,965 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T08:36:24,965 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T08:36:24,965 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34751,1731746183495-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:36:24,965 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34751,1731746183495-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T08:36:24,968 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T08:36:24,968 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T08:36:24,968 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34751,1731746183495-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:25,024 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57927b81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:36:25,024 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c27dd56784bd,34751,-1 for getting cluster id 2024-11-16T08:36:25,025 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T08:36:25,030 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fdfe7030-9694-44fd-b063-82fe2493a33c' 2024-11-16T08:36:25,031 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T08:36:25,032 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fdfe7030-9694-44fd-b063-82fe2493a33c" 2024-11-16T08:36:25,032 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b11094c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:36:25,032 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c27dd56784bd,34751,-1] 2024-11-16T08:36:25,032 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T08:36:25,033 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:25,035 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60538, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T08:36:25,036 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aa3a1e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:36:25,036 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:36:25,037 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,39115,1731746183697, seqNum=-1] 2024-11-16T08:36:25,038 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:36:25,040 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44302, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:36:25,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c27dd56784bd,34751,1731746183495 2024-11-16T08:36:25,043 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:25,046 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T08:36:25,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T08:36:25,046 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:36:25,046 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:36:25,046 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:25,046 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:25,047 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T08:36:25,047 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T08:36:25,047 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=32570933, stopped=false 2024-11-16T08:36:25,047 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c27dd56784bd,34751,1731746183495 2024-11-16T08:36:25,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:36:25,078 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:36:25,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:25,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:36:25,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:25,078 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:36:25,079 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:36:25,079 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:25,079 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:36:25,079 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c27dd56784bd,39115,1731746183697' ***** 2024-11-16T08:36:25,079 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T08:36:25,079 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:36:25,079 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T08:36:25,080 INFO [RS:0;c27dd56784bd:39115 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T08:36:25,080 INFO [RS:0;c27dd56784bd:39115 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T08:36:25,080 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T08:36:25,080 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(959): stopping server c27dd56784bd,39115,1731746183697 2024-11-16T08:36:25,080 INFO [RS:0;c27dd56784bd:39115 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:36:25,080 INFO [RS:0;c27dd56784bd:39115 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c27dd56784bd:39115. 2024-11-16T08:36:25,080 DEBUG [RS:0;c27dd56784bd:39115 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:36:25,080 DEBUG [RS:0;c27dd56784bd:39115 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:25,080 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T08:36:25,080 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T08:36:25,080 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T08:36:25,080 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T08:36:25,081 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T08:36:25,082 DEBUG [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T08:36:25,082 DEBUG [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T08:36:25,082 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:36:25,082 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:36:25,082 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:36:25,082 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:36:25,082 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:36:25,082 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T08:36:25,098 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740/.tmp/ns/5ec17fb302da4a3296e72c450d8e08bd is 43, key is default/ns:d/1731746184947/Put/seqid=0 2024-11-16T08:36:25,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741835_1011 (size=5153) 2024-11-16T08:36:25,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741835_1011 (size=5153) 2024-11-16T08:36:25,105 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740/.tmp/ns/5ec17fb302da4a3296e72c450d8e08bd 2024-11-16T08:36:25,115 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740/.tmp/ns/5ec17fb302da4a3296e72c450d8e08bd as hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740/ns/5ec17fb302da4a3296e72c450d8e08bd 2024-11-16T08:36:25,124 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740/ns/5ec17fb302da4a3296e72c450d8e08bd, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T08:36:25,125 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false 2024-11-16T08:36:25,126 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T08:36:25,131 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T08:36:25,132 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:36:25,132 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:36:25,132 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746185082Running coprocessor pre-close hooks at 1731746185082Disabling compacts and flushes for region at 1731746185082Disabling writes for close at 1731746185082Obtaining lock to block concurrent updates at 1731746185082Preparing flush snapshotting stores in 1588230740 at 1731746185082Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731746185083 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731746185083Flushing 1588230740/ns: creating writer at 1731746185084 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731746185098 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731746185098Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33e1392b: reopening flushed file at 1731746185113 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false at 1731746185125 (+12 ms)Writing region close event to WAL at 1731746185127 (+2 ms)Running coprocessor post-close hooks at 1731746185132 (+5 ms)Closed at 1731746185132 2024-11-16T08:36:25,132 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T08:36:25,194 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T08:36:25,194 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T08:36:25,282 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(976): stopping server c27dd56784bd,39115,1731746183697; all regions closed. 2024-11-16T08:36:25,282 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,282 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,283 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,283 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,283 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741834_1010 (size=1152) 2024-11-16T08:36:25,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741834_1010 (size=1152) 2024-11-16T08:36:25,287 DEBUG [RS:0;c27dd56784bd:39115 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/oldWALs 2024-11-16T08:36:25,287 INFO [RS:0;c27dd56784bd:39115 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C39115%2C1731746183697.meta:.meta(num 1731746184853) 2024-11-16T08:36:25,288 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,288 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,288 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,288 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,288 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741833_1009 (size=93) 2024-11-16T08:36:25,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741833_1009 (size=93) 2024-11-16T08:36:25,295 DEBUG [RS:0;c27dd56784bd:39115 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/oldWALs 2024-11-16T08:36:25,295 INFO [RS:0;c27dd56784bd:39115 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C39115%2C1731746183697:(num 1731746184333) 2024-11-16T08:36:25,295 DEBUG [RS:0;c27dd56784bd:39115 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:25,295 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:36:25,295 INFO [RS:0;c27dd56784bd:39115 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:36:25,295 INFO [RS:0;c27dd56784bd:39115 {}] hbase.ChoreService(370): Chore service for: regionserver/c27dd56784bd:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T08:36:25,296 INFO [RS:0;c27dd56784bd:39115 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:36:25,296 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:36:25,296 INFO [RS:0;c27dd56784bd:39115 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39115 2024-11-16T08:36:25,303 INFO [RS:0;c27dd56784bd:39115 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:36:25,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:36:25,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c27dd56784bd,39115,1731746183697 2024-11-16T08:36:25,314 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c27dd56784bd,39115,1731746183697] 2024-11-16T08:36:25,324 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c27dd56784bd,39115,1731746183697 already deleted, retry=false 2024-11-16T08:36:25,324 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c27dd56784bd,39115,1731746183697 expired; onlineServers=0 2024-11-16T08:36:25,324 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c27dd56784bd,34751,1731746183495' ***** 2024-11-16T08:36:25,324 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T08:36:25,325 INFO [M:0;c27dd56784bd:34751 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:36:25,325 INFO [M:0;c27dd56784bd:34751 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:36:25,325 DEBUG [M:0;c27dd56784bd:34751 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T08:36:25,325 DEBUG [M:0;c27dd56784bd:34751 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T08:36:25,325 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T08:36:25,325 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746184084 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746184084,5,FailOnTimeoutGroup] 2024-11-16T08:36:25,325 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746184084 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746184084,5,FailOnTimeoutGroup] 2024-11-16T08:36:25,325 INFO [M:0;c27dd56784bd:34751 {}] hbase.ChoreService(370): Chore service for: master/c27dd56784bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T08:36:25,325 INFO [M:0;c27dd56784bd:34751 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:36:25,326 DEBUG [M:0;c27dd56784bd:34751 {}] master.HMaster(1795): Stopping service threads 2024-11-16T08:36:25,326 INFO [M:0;c27dd56784bd:34751 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T08:36:25,326 INFO [M:0;c27dd56784bd:34751 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:36:25,326 INFO [M:0;c27dd56784bd:34751 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T08:36:25,326 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T08:36:25,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T08:36:25,335 DEBUG [M:0;c27dd56784bd:34751 {}] zookeeper.ZKUtil(347): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T08:36:25,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:25,335 WARN [M:0;c27dd56784bd:34751 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T08:36:25,336 INFO [M:0;c27dd56784bd:34751 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/.lastflushedseqids 2024-11-16T08:36:25,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:25,347 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:25,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741836_1012 (size=99) 2024-11-16T08:36:25,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741836_1012 (size=99) 2024-11-16T08:36:25,351 INFO [M:0;c27dd56784bd:34751 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T08:36:25,351 INFO [M:0;c27dd56784bd:34751 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T08:36:25,351 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:36:25,351 INFO [M:0;c27dd56784bd:34751 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:25,351 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:25,351 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:36:25,351 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:25,351 INFO [M:0;c27dd56784bd:34751 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T08:36:25,369 DEBUG [M:0;c27dd56784bd:34751 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e75dbea57563449db9e8ceb80c9ecb7f is 82, key is hbase:meta,,1/info:regioninfo/1731746184885/Put/seqid=0 2024-11-16T08:36:25,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741837_1013 (size=5672) 2024-11-16T08:36:25,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741837_1013 (size=5672) 2024-11-16T08:36:25,376 INFO [M:0;c27dd56784bd:34751 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e75dbea57563449db9e8ceb80c9ecb7f 2024-11-16T08:36:25,397 DEBUG [M:0;c27dd56784bd:34751 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/13ca491836ef48caa5377c58819af87d is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731746184953/Put/seqid=0 2024-11-16T08:36:25,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741838_1014 (size=5275) 2024-11-16T08:36:25,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741838_1014 (size=5275) 2024-11-16T08:36:25,403 INFO [M:0;c27dd56784bd:34751 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/13ca491836ef48caa5377c58819af87d 2024-11-16T08:36:25,414 INFO [RS:0;c27dd56784bd:39115 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:36:25,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:36:25,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39115-0x10142c9b4e00001, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:36:25,414 INFO [RS:0;c27dd56784bd:39115 {}] regionserver.HRegionServer(1031): Exiting; stopping=c27dd56784bd,39115,1731746183697; zookeeper connection closed. 2024-11-16T08:36:25,414 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@432e988d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@432e988d 2024-11-16T08:36:25,415 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T08:36:25,424 DEBUG [M:0;c27dd56784bd:34751 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c743357c694d46eaac2e7336201d9a82 is 69, key is c27dd56784bd,39115,1731746183697/rs:state/1731746184164/Put/seqid=0 2024-11-16T08:36:25,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741839_1015 (size=5156) 2024-11-16T08:36:25,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741839_1015 (size=5156) 2024-11-16T08:36:25,430 INFO [M:0;c27dd56784bd:34751 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c743357c694d46eaac2e7336201d9a82 2024-11-16T08:36:25,450 DEBUG [M:0;c27dd56784bd:34751 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/416cd2ad99f242a6b3b36f49207643d7 is 52, key is load_balancer_on/state:d/1731746185045/Put/seqid=0 2024-11-16T08:36:25,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741840_1016 (size=5056) 2024-11-16T08:36:25,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741840_1016 (size=5056) 2024-11-16T08:36:25,456 INFO [M:0;c27dd56784bd:34751 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/416cd2ad99f242a6b3b36f49207643d7 2024-11-16T08:36:25,463 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e75dbea57563449db9e8ceb80c9ecb7f as hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e75dbea57563449db9e8ceb80c9ecb7f 2024-11-16T08:36:25,470 INFO [M:0;c27dd56784bd:34751 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e75dbea57563449db9e8ceb80c9ecb7f, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T08:36:25,472 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/13ca491836ef48caa5377c58819af87d as hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/13ca491836ef48caa5377c58819af87d 2024-11-16T08:36:25,479 INFO [M:0;c27dd56784bd:34751 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/13ca491836ef48caa5377c58819af87d, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T08:36:25,481 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c743357c694d46eaac2e7336201d9a82 as hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c743357c694d46eaac2e7336201d9a82 2024-11-16T08:36:25,489 INFO [M:0;c27dd56784bd:34751 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c743357c694d46eaac2e7336201d9a82, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T08:36:25,491 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/416cd2ad99f242a6b3b36f49207643d7 as hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/416cd2ad99f242a6b3b36f49207643d7 2024-11-16T08:36:25,499 INFO [M:0;c27dd56784bd:34751 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37163/user/jenkins/test-data/32a97352-0bb5-4602-f761-3fa5ff5d8087/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/416cd2ad99f242a6b3b36f49207643d7, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T08:36:25,501 INFO [M:0;c27dd56784bd:34751 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false 2024-11-16T08:36:25,502 INFO [M:0;c27dd56784bd:34751 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:25,502 DEBUG [M:0;c27dd56784bd:34751 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746185351Disabling compacts and flushes for region at 1731746185351Disabling writes for close at 1731746185351Obtaining lock to block concurrent updates at 1731746185351Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731746185351Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731746185352 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731746185353 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731746185353Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731746185369 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731746185369Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731746185381 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731746185396 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731746185396Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731746185408 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731746185424 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731746185424Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731746185435 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731746185450 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731746185450Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@430cdf68: reopening flushed file at 1731746185462 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@653e2a77: reopening flushed file at 1731746185470 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17c42b91: reopening flushed file at 1731746185480 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@489eb2ef: reopening flushed file at 1731746185490 (+10 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false at 1731746185501 (+11 ms)Writing region close event to WAL at 1731746185502 (+1 ms)Closed at 1731746185502 2024-11-16T08:36:25,503 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,503 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,503 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,503 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,503 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:25,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40607 is added to blk_1073741830_1006 (size=10311) 2024-11-16T08:36:25,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38815 is added to blk_1073741830_1006 (size=10311) 2024-11-16T08:36:25,506 INFO [M:0;c27dd56784bd:34751 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T08:36:25,506 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:36:25,506 INFO [M:0;c27dd56784bd:34751 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34751 2024-11-16T08:36:25,507 INFO [M:0;c27dd56784bd:34751 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:36:25,614 INFO [M:0;c27dd56784bd:34751 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:36:25,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:36:25,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34751-0x10142c9b4e00000, quorum=127.0.0.1:59632, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:36:25,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b5e52bc{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:25,616 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@882842c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:25,616 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:25,617 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@573af0f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:25,617 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d5e070a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:25,618 WARN [BP-1465217183-172.17.0.3-1731746181275 heartbeating to localhost/127.0.0.1:37163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:36:25,618 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:36:25,618 WARN [BP-1465217183-172.17.0.3-1731746181275 heartbeating to localhost/127.0.0.1:37163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1465217183-172.17.0.3-1731746181275 (Datanode Uuid 48b837a4-0a92-43bc-9fc8-357ad1fbdf4d) service to localhost/127.0.0.1:37163 2024-11-16T08:36:25,618 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:36:25,618 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/data/data3/current/BP-1465217183-172.17.0.3-1731746181275 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:25,619 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/data/data4/current/BP-1465217183-172.17.0.3-1731746181275 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:25,619 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:36:25,621 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2cd60cfb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:25,621 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f88e14b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:25,621 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:25,622 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e7873b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:25,622 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa9c156{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:25,623 WARN [BP-1465217183-172.17.0.3-1731746181275 heartbeating to localhost/127.0.0.1:37163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:36:25,623 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:36:25,623 WARN [BP-1465217183-172.17.0.3-1731746181275 heartbeating to localhost/127.0.0.1:37163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1465217183-172.17.0.3-1731746181275 (Datanode Uuid 0bcc3664-6ae7-43e4-baa8-ea12cc636143) service to localhost/127.0.0.1:37163 2024-11-16T08:36:25,623 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:36:25,624 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/data/data1/current/BP-1465217183-172.17.0.3-1731746181275 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:25,624 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/cluster_32ee417c-0dfe-af80-26d2-5b9974ad3a0f/data/data2/current/BP-1465217183-172.17.0.3-1731746181275 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:25,624 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:36:25,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3fa5684d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:36:25,630 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7bd218c7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:25,630 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:25,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2152d149{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:25,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a15ed6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:25,636 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T08:36:25,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T08:36:25,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T08:36:25,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.log.dir so I do NOT create it in target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78 2024-11-16T08:36:25,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f1490217-89f5-508b-0279-d41bd0326215/hadoop.tmp.dir so I do NOT create it in target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78 2024-11-16T08:36:25,653 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa, deleteOnExit=true 2024-11-16T08:36:25,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T08:36:25,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/test.cache.data in system properties and HBase conf 2024-11-16T08:36:25,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T08:36:25,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir in system properties and HBase conf 2024-11-16T08:36:25,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T08:36:25,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T08:36:25,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T08:36:25,653 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/nfs.dump.dir in system properties and HBase conf 2024-11-16T08:36:25,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/java.io.tmpdir in system properties and HBase conf 2024-11-16T08:36:25,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:36:25,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T08:36:25,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T08:36:25,667 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:36:25,869 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T08:36:25,873 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:25,893 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:25,896 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:25,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:26,070 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:26,076 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:26,080 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:26,080 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:26,080 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:36:26,081 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:26,081 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa07d80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:26,082 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3150e6db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:26,178 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2606b08f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/java.io.tmpdir/jetty-localhost-37897-hadoop-hdfs-3_4_1-tests_jar-_-any-8788077289344228085/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:36:26,179 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c053989{HTTP/1.1, (http/1.1)}{localhost:37897} 2024-11-16T08:36:26,179 INFO [Time-limited test {}] server.Server(415): Started @104415ms 2024-11-16T08:36:26,191 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:36:26,191 INFO [regionserver/c27dd56784bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:36:26,461 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:26,465 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:26,466 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:26,466 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:26,466 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:36:26,466 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@180ba686{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:26,467 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21f2acf7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:26,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5afc739a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/java.io.tmpdir/jetty-localhost-45315-hadoop-hdfs-3_4_1-tests_jar-_-any-3827432145799888284/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:26,560 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@200d0f88{HTTP/1.1, (http/1.1)}{localhost:45315} 2024-11-16T08:36:26,560 INFO [Time-limited test {}] server.Server(415): Started @104796ms 2024-11-16T08:36:26,561 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:36:26,598 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:26,603 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:26,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:26,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:26,603 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:36:26,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57ebe64c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:26,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2dc8ddff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:26,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@48b0be64{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/java.io.tmpdir/jetty-localhost-32839-hadoop-hdfs-3_4_1-tests_jar-_-any-2731487889972740413/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:26,702 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4cae84f2{HTTP/1.1, (http/1.1)}{localhost:32839} 2024-11-16T08:36:26,702 INFO [Time-limited test {}] server.Server(415): Started @104938ms 2024-11-16T08:36:26,704 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:36:27,662 WARN [Thread-665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data1/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:27,662 WARN [Thread-666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data2/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:27,687 WARN [Thread-629 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:36:27,689 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x99dc978add2ecae5 with lease ID 0xfda6324f07e9c79d: Processing first storage report for DS-8c065950-89b1-42e2-9894-acfcfdb20745 from datanode DatanodeRegistration(127.0.0.1:43199, datanodeUuid=4f80ac99-a673-4253-bc8d-d38e9d3b994a, infoPort=45083, infoSecurePort=0, ipcPort=37695, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:27,689 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99dc978add2ecae5 with lease ID 0xfda6324f07e9c79d: from storage DS-8c065950-89b1-42e2-9894-acfcfdb20745 node DatanodeRegistration(127.0.0.1:43199, datanodeUuid=4f80ac99-a673-4253-bc8d-d38e9d3b994a, infoPort=45083, infoSecurePort=0, ipcPort=37695, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:27,689 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x99dc978add2ecae5 with lease ID 0xfda6324f07e9c79d: Processing first storage report for DS-696495fe-c99e-4d64-84a4-84257b7a0f4f from datanode DatanodeRegistration(127.0.0.1:43199, datanodeUuid=4f80ac99-a673-4253-bc8d-d38e9d3b994a, infoPort=45083, infoSecurePort=0, ipcPort=37695, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:27,689 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99dc978add2ecae5 with lease ID 0xfda6324f07e9c79d: from storage DS-696495fe-c99e-4d64-84a4-84257b7a0f4f node DatanodeRegistration(127.0.0.1:43199, datanodeUuid=4f80ac99-a673-4253-bc8d-d38e9d3b994a, infoPort=45083, infoSecurePort=0, ipcPort=37695, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:27,801 WARN [Thread-676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data3/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:27,801 WARN [Thread-677 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data4/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:27,823 WARN [Thread-652 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:36:27,825 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd89169ee90c9c753 with lease ID 0xfda6324f07e9c79e: Processing first storage report for DS-af74058c-b8da-46d5-95f6-9e0ec577f137 from datanode DatanodeRegistration(127.0.0.1:34631, datanodeUuid=37c37cb5-bfce-4659-be37-7fdcb648c3f2, infoPort=43371, infoSecurePort=0, ipcPort=32773, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:27,826 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd89169ee90c9c753 with lease ID 0xfda6324f07e9c79e: from storage DS-af74058c-b8da-46d5-95f6-9e0ec577f137 node DatanodeRegistration(127.0.0.1:34631, datanodeUuid=37c37cb5-bfce-4659-be37-7fdcb648c3f2, infoPort=43371, infoSecurePort=0, ipcPort=32773, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:27,826 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd89169ee90c9c753 with lease ID 0xfda6324f07e9c79e: Processing first storage report for DS-271cabbe-032c-48da-a1ca-f357b40a914c from datanode DatanodeRegistration(127.0.0.1:34631, datanodeUuid=37c37cb5-bfce-4659-be37-7fdcb648c3f2, infoPort=43371, infoSecurePort=0, ipcPort=32773, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:27,826 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd89169ee90c9c753 with lease ID 0xfda6324f07e9c79e: from storage DS-271cabbe-032c-48da-a1ca-f357b40a914c node DatanodeRegistration(127.0.0.1:34631, datanodeUuid=37c37cb5-bfce-4659-be37-7fdcb648c3f2, infoPort=43371, infoSecurePort=0, ipcPort=32773, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:27,853 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78 2024-11-16T08:36:27,857 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/zookeeper_0, clientPort=54424, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T08:36:27,858 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54424 2024-11-16T08:36:27,858 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:27,860 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:27,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34631 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:36:27,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43199 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:36:27,871 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58 with version=8 2024-11-16T08:36:27,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/hbase-staging 2024-11-16T08:36:27,873 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:36:27,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:27,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:27,873 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:36:27,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:27,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:36:27,873 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T08:36:27,873 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:36:27,874 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44433 2024-11-16T08:36:27,876 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44433 connecting to ZooKeeper ensemble=127.0.0.1:54424 2024-11-16T08:36:27,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:444330x0, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:36:27,935 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44433-0x10142c9c5fb0000 connected 2024-11-16T08:36:28,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:28,021 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:28,023 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:36:28,023 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58, hbase.cluster.distributed=false 2024-11-16T08:36:28,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:36:28,025 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44433 2024-11-16T08:36:28,025 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44433 2024-11-16T08:36:28,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44433 2024-11-16T08:36:28,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44433 2024-11-16T08:36:28,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44433 2024-11-16T08:36:28,041 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:36:28,041 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:28,041 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:28,041 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:36:28,041 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:28,042 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:36:28,042 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T08:36:28,042 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:36:28,042 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34739 2024-11-16T08:36:28,044 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34739 connecting to ZooKeeper ensemble=127.0.0.1:54424 2024-11-16T08:36:28,045 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:28,046 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:28,061 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347390x0, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:36:28,061 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34739-0x10142c9c5fb0001 connected 2024-11-16T08:36:28,062 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:36:28,062 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T08:36:28,062 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T08:36:28,063 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T08:36:28,064 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:36:28,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34739 2024-11-16T08:36:28,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34739 2024-11-16T08:36:28,065 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34739 2024-11-16T08:36:28,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34739 2024-11-16T08:36:28,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34739 2024-11-16T08:36:28,079 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c27dd56784bd:44433 2024-11-16T08:36:28,080 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c27dd56784bd,44433,1731746187873 2024-11-16T08:36:28,093 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:36:28,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:36:28,093 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c27dd56784bd,44433,1731746187873 2024-11-16T08:36:28,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,103 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T08:36:28,103 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,104 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T08:36:28,104 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c27dd56784bd,44433,1731746187873 from backup master directory 2024-11-16T08:36:28,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c27dd56784bd,44433,1731746187873 2024-11-16T08:36:28,114 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:36:28,114 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:36:28,114 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c27dd56784bd,44433,1731746187873 2024-11-16T08:36:28,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:36:28,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/hbase.id] with ID: e1a9cf6e-c433-4f5b-80ec-552900008b48 2024-11-16T08:36:28,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/.tmp/hbase.id 2024-11-16T08:36:28,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43199 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:36:28,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34631 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:36:28,125 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/.tmp/hbase.id]:[hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/hbase.id] 2024-11-16T08:36:28,137 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:28,137 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T08:36:28,138 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T08:36:28,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,151 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43199 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:36:28,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34631 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:36:28,160 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:36:28,161 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T08:36:28,162 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:36:28,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43199 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:36:28,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34631 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:36:28,174 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store 2024-11-16T08:36:28,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43199 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:36:28,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34631 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:36:28,183 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:28,183 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:36:28,183 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:28,183 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:28,183 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:36:28,183 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:28,183 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:36:28,183 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746188183Disabling compacts and flushes for region at 1731746188183Disabling writes for close at 1731746188183Writing region close event to WAL at 1731746188183Closed at 1731746188183 2024-11-16T08:36:28,184 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/.initializing 2024-11-16T08:36:28,184 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873 2024-11-16T08:36:28,187 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C44433%2C1731746187873, suffix=, logDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873, archiveDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/oldWALs, maxLogs=10 2024-11-16T08:36:28,188 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C44433%2C1731746187873.1731746188187 2024-11-16T08:36:28,193 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746188187 2024-11-16T08:36:28,195 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43371:43371),(127.0.0.1/127.0.0.1:45083:45083)] 2024-11-16T08:36:28,196 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:36:28,196 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:28,196 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,196 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,198 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T08:36:28,200 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:28,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:28,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T08:36:28,202 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:28,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:36:28,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,203 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T08:36:28,203 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:28,203 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:36:28,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T08:36:28,205 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:28,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:36:28,205 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,206 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,207 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,208 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,208 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,209 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T08:36:28,210 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:36:28,213 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:36:28,213 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873385, jitterRate=0.11056670546531677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T08:36:28,214 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731746188196Initializing all the Stores at 1731746188197 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746188197Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746188198 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746188198Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746188198Cleaning up temporary data from old regions at 1731746188208 (+10 ms)Region opened successfully at 1731746188214 (+6 ms) 2024-11-16T08:36:28,215 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T08:36:28,218 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c0eeae7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:36:28,220 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T08:36:28,220 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T08:36:28,220 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T08:36:28,220 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T08:36:28,221 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T08:36:28,221 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T08:36:28,221 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T08:36:28,224 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T08:36:28,225 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T08:36:28,236 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T08:36:28,236 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T08:36:28,237 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T08:36:28,246 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T08:36:28,246 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T08:36:28,248 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T08:36:28,256 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T08:36:28,258 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T08:36:28,267 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T08:36:28,269 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T08:36:28,282 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T08:36:28,292 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:36:28,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:36:28,292 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,293 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c27dd56784bd,44433,1731746187873, sessionid=0x10142c9c5fb0000, setting cluster-up flag (Was=false) 2024-11-16T08:36:28,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,314 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,345 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T08:36:28,347 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,44433,1731746187873 2024-11-16T08:36:28,366 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:28,398 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T08:36:28,399 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,44433,1731746187873 2024-11-16T08:36:28,400 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T08:36:28,402 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T08:36:28,402 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T08:36:28,403 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T08:36:28,403 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c27dd56784bd,44433,1731746187873 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T08:36:28,404 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:36:28,404 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:36:28,404 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:36:28,405 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:36:28,405 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c27dd56784bd:0, corePoolSize=10, maxPoolSize=10 2024-11-16T08:36:28,405 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,405 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:36:28,405 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,408 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:36:28,408 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T08:36:28,408 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731746218408 2024-11-16T08:36:28,408 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T08:36:28,408 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T08:36:28,408 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T08:36:28,409 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T08:36:28,409 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T08:36:28,409 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T08:36:28,409 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,409 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:28,409 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T08:36:28,409 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T08:36:28,409 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T08:36:28,409 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T08:36:28,410 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T08:36:28,410 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T08:36:28,410 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746188410,5,FailOnTimeoutGroup] 2024-11-16T08:36:28,410 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746188410,5,FailOnTimeoutGroup] 2024-11-16T08:36:28,410 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,410 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T08:36:28,410 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,410 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43199 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:36:28,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34631 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:36:28,419 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T08:36:28,419 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58 2024-11-16T08:36:28,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34631 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:36:28,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43199 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:36:28,471 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(746): ClusterId : e1a9cf6e-c433-4f5b-80ec-552900008b48 2024-11-16T08:36:28,471 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T08:36:28,483 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T08:36:28,483 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T08:36:28,493 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T08:36:28,494 DEBUG [RS:0;c27dd56784bd:34739 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f44dd06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:36:28,504 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c27dd56784bd:34739 2024-11-16T08:36:28,504 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T08:36:28,504 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T08:36:28,504 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T08:36:28,505 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(2659): reportForDuty to master=c27dd56784bd,44433,1731746187873 with port=34739, startcode=1731746188041 2024-11-16T08:36:28,505 DEBUG [RS:0;c27dd56784bd:34739 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T08:36:28,507 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42265, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T08:36:28,508 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44433 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c27dd56784bd,34739,1731746188041 2024-11-16T08:36:28,508 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44433 {}] master.ServerManager(517): Registering regionserver=c27dd56784bd,34739,1731746188041 2024-11-16T08:36:28,510 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58 2024-11-16T08:36:28,510 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34591 2024-11-16T08:36:28,510 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T08:36:28,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:36:28,520 DEBUG [RS:0;c27dd56784bd:34739 {}] zookeeper.ZKUtil(111): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c27dd56784bd,34739,1731746188041 2024-11-16T08:36:28,520 WARN [RS:0;c27dd56784bd:34739 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:36:28,521 INFO [RS:0;c27dd56784bd:34739 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:36:28,521 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041 2024-11-16T08:36:28,521 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c27dd56784bd,34739,1731746188041] 2024-11-16T08:36:28,525 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T08:36:28,527 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T08:36:28,527 INFO [RS:0;c27dd56784bd:34739 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T08:36:28,528 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,528 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T08:36:28,529 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T08:36:28,529 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,529 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,529 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,529 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,529 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,529 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,529 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:36:28,529 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,530 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,530 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,530 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,530 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,530 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:28,530 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:36:28,530 DEBUG [RS:0;c27dd56784bd:34739 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:36:28,531 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,531 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,531 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,531 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,531 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,531 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34739,1731746188041-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:36:28,546 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T08:36:28,546 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,34739,1731746188041-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,546 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,546 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.Replication(171): c27dd56784bd,34739,1731746188041 started 2024-11-16T08:36:28,560 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:28,560 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1482): Serving as c27dd56784bd,34739,1731746188041, RpcServer on c27dd56784bd/172.17.0.3:34739, sessionid=0x10142c9c5fb0001 2024-11-16T08:36:28,561 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T08:36:28,561 DEBUG [RS:0;c27dd56784bd:34739 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c27dd56784bd,34739,1731746188041 2024-11-16T08:36:28,561 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,34739,1731746188041' 2024-11-16T08:36:28,561 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T08:36:28,561 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T08:36:28,562 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T08:36:28,562 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T08:36:28,562 DEBUG [RS:0;c27dd56784bd:34739 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c27dd56784bd,34739,1731746188041 2024-11-16T08:36:28,562 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,34739,1731746188041' 2024-11-16T08:36:28,562 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T08:36:28,563 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T08:36:28,563 DEBUG [RS:0;c27dd56784bd:34739 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T08:36:28,563 INFO [RS:0;c27dd56784bd:34739 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T08:36:28,563 INFO [RS:0;c27dd56784bd:34739 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T08:36:28,666 INFO [RS:0;c27dd56784bd:34739 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C34739%2C1731746188041, suffix=, logDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041, archiveDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs, maxLogs=32 2024-11-16T08:36:28,668 INFO [RS:0;c27dd56784bd:34739 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34739%2C1731746188041.1731746188667 2024-11-16T08:36:28,675 INFO [RS:0;c27dd56784bd:34739 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 2024-11-16T08:36:28,677 DEBUG [RS:0;c27dd56784bd:34739 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43371:43371),(127.0.0.1/127.0.0.1:45083:45083)] 2024-11-16T08:36:28,830 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:28,832 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:36:28,835 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:36:28,835 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:28,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:28,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:36:28,839 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:36:28,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:28,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:28,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:36:28,843 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:36:28,843 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:28,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:28,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:36:28,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:36:28,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:28,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:28,847 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:36:28,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740 2024-11-16T08:36:28,848 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740 2024-11-16T08:36:28,850 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:36:28,850 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:36:28,850 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:36:28,852 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:36:28,854 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:36:28,855 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862287, jitterRate=0.0964546650648117}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:36:28,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731746188830Initializing all the Stores at 1731746188831 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746188831Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746188832 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746188832Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746188832Cleaning up temporary data from old regions at 1731746188850 (+18 ms)Region opened successfully at 1731746188856 (+6 ms) 2024-11-16T08:36:28,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:36:28,856 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:36:28,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:36:28,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:36:28,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:36:28,856 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:36:28,857 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746188856Disabling compacts and flushes for region at 1731746188856Disabling writes for close at 1731746188856Writing region close event to WAL at 1731746188856Closed at 1731746188856 2024-11-16T08:36:28,858 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:36:28,858 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T08:36:28,858 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T08:36:28,860 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:36:28,861 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T08:36:29,012 DEBUG [c27dd56784bd:44433 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T08:36:29,012 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c27dd56784bd,34739,1731746188041 2024-11-16T08:36:29,014 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,34739,1731746188041, state=OPENING 2024-11-16T08:36:29,061 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T08:36:29,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:29,071 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:36:29,072 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:36:29,072 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:36:29,072 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:36:29,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,34739,1731746188041}] 2024-11-16T08:36:29,226 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T08:36:29,228 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50987, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T08:36:29,232 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T08:36:29,233 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:36:29,235 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C34739%2C1731746188041.meta, suffix=.meta, logDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041, archiveDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs, maxLogs=32 2024-11-16T08:36:29,236 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta 2024-11-16T08:36:29,242 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta 2024-11-16T08:36:29,243 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43371:43371),(127.0.0.1/127.0.0.1:45083:45083)] 2024-11-16T08:36:29,243 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:36:29,244 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T08:36:29,244 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T08:36:29,244 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T08:36:29,244 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T08:36:29,244 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:29,244 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T08:36:29,244 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T08:36:29,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:36:29,249 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:36:29,249 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:29,249 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:29,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:36:29,251 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:36:29,251 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:29,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:29,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:36:29,252 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:36:29,252 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:29,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:29,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:36:29,254 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:36:29,254 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:29,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:36:29,255 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:36:29,256 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740 2024-11-16T08:36:29,258 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740 2024-11-16T08:36:29,260 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:36:29,260 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:36:29,261 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:36:29,263 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:36:29,264 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=883254, jitterRate=0.12311585247516632}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:36:29,264 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T08:36:29,265 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731746189245Writing region info on filesystem at 1731746189245Initializing all the Stores at 1731746189246 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746189246Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746189247 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746189247Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746189247Cleaning up temporary data from old regions at 1731746189260 (+13 ms)Running coprocessor post-open hooks at 1731746189264 (+4 ms)Region opened successfully at 1731746189265 (+1 ms) 2024-11-16T08:36:29,267 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731746189226 2024-11-16T08:36:29,270 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T08:36:29,270 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T08:36:29,272 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,34739,1731746188041 2024-11-16T08:36:29,273 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,34739,1731746188041, state=OPEN 2024-11-16T08:36:29,359 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:36:29,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:36:29,359 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c27dd56784bd,34739,1731746188041 2024-11-16T08:36:29,360 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:36:29,360 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:36:29,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T08:36:29,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,34739,1731746188041 in 288 msec 2024-11-16T08:36:29,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T08:36:29,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 506 msec 2024-11-16T08:36:29,369 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:36:29,369 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T08:36:29,371 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:36:29,371 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,34739,1731746188041, seqNum=-1] 2024-11-16T08:36:29,371 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:36:29,373 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50201, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:36:29,380 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 977 msec 2024-11-16T08:36:29,380 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731746189380, completionTime=-1 2024-11-16T08:36:29,380 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T08:36:29,381 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T08:36:29,383 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T08:36:29,383 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731746249383 2024-11-16T08:36:29,383 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731746309383 2024-11-16T08:36:29,383 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T08:36:29,383 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44433,1731746187873-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,383 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44433,1731746187873-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,383 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44433,1731746187873-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,383 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c27dd56784bd:44433, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,383 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,384 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,385 DEBUG [master/c27dd56784bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T08:36:29,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.273sec 2024-11-16T08:36:29,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T08:36:29,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T08:36:29,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T08:36:29,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T08:36:29,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T08:36:29,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44433,1731746187873-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:36:29,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44433,1731746187873-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T08:36:29,390 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T08:36:29,391 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T08:36:29,391 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44433,1731746187873-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-16T08:36:29,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@142e8458, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:36:29,472 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c27dd56784bd,44433,-1 for getting cluster id 2024-11-16T08:36:29,472 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T08:36:29,475 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e1a9cf6e-c433-4f5b-80ec-552900008b48' 2024-11-16T08:36:29,475 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T08:36:29,475 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e1a9cf6e-c433-4f5b-80ec-552900008b48" 2024-11-16T08:36:29,476 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e639fd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:36:29,476 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c27dd56784bd,44433,-1] 2024-11-16T08:36:29,476 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T08:36:29,477 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:36:29,480 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48670, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T08:36:29,481 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ee117d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:36:29,482 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:36:29,484 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,34739,1731746188041, seqNum=-1] 2024-11-16T08:36:29,484 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:36:29,486 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37992, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:36:29,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c27dd56784bd,44433,1731746187873 2024-11-16T08:36:29,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:29,492 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T08:36:29,510 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:36:29,510 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:29,510 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:29,510 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:36:29,511 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:36:29,511 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:36:29,511 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T08:36:29,511 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:36:29,511 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44603 2024-11-16T08:36:29,513 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44603 connecting to ZooKeeper ensemble=127.0.0.1:54424 2024-11-16T08:36:29,513 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:29,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:36:29,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446030x0, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:36:29,541 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:446030x0, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-16T08:36:29,541 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-16T08:36:29,542 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44603-0x10142c9c5fb0002 connected 2024-11-16T08:36:29,544 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T08:36:29,546 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T08:36:29,546 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:44603-0x10142c9c5fb0002, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T08:36:29,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44603-0x10142c9c5fb0002, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:36:29,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44603 2024-11-16T08:36:29,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44603 2024-11-16T08:36:29,559 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44603 2024-11-16T08:36:29,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44603 2024-11-16T08:36:29,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44603 2024-11-16T08:36:29,565 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(746): ClusterId : e1a9cf6e-c433-4f5b-80ec-552900008b48 2024-11-16T08:36:29,565 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T08:36:29,578 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T08:36:29,578 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T08:36:29,588 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T08:36:29,589 DEBUG [RS:1;c27dd56784bd:44603 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19849140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:36:29,606 DEBUG [RS:1;c27dd56784bd:44603 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c27dd56784bd:44603 2024-11-16T08:36:29,606 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T08:36:29,606 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T08:36:29,606 DEBUG [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T08:36:29,607 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(2659): reportForDuty to master=c27dd56784bd,44433,1731746187873 with port=44603, startcode=1731746189510 2024-11-16T08:36:29,608 DEBUG [RS:1;c27dd56784bd:44603 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T08:36:29,610 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55507, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T08:36:29,610 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44433 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c27dd56784bd,44603,1731746189510 2024-11-16T08:36:29,610 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44433 {}] master.ServerManager(517): Registering regionserver=c27dd56784bd,44603,1731746189510 2024-11-16T08:36:29,612 DEBUG [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58 2024-11-16T08:36:29,612 DEBUG [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34591 2024-11-16T08:36:29,612 DEBUG [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T08:36:29,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:36:29,625 DEBUG [RS:1;c27dd56784bd:44603 {}] zookeeper.ZKUtil(111): regionserver:44603-0x10142c9c5fb0002, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c27dd56784bd,44603,1731746189510 2024-11-16T08:36:29,626 WARN [RS:1;c27dd56784bd:44603 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:36:29,626 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c27dd56784bd,44603,1731746189510] 2024-11-16T08:36:29,626 INFO [RS:1;c27dd56784bd:44603 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:36:29,626 DEBUG [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510 2024-11-16T08:36:29,629 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T08:36:29,632 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T08:36:29,632 INFO [RS:1;c27dd56784bd:44603 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T08:36:29,632 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,632 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T08:36:29,633 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T08:36:29,633 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:36:29,634 DEBUG [RS:1;c27dd56784bd:44603 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:36:29,637 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,637 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,637 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,637 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,637 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,637 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44603,1731746189510-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:36:29,683 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T08:36:29,683 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44603,1731746189510-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,684 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,684 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.Replication(171): c27dd56784bd,44603,1731746189510 started 2024-11-16T08:36:29,699 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:36:29,699 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(1482): Serving as c27dd56784bd,44603,1731746189510, RpcServer on c27dd56784bd/172.17.0.3:44603, sessionid=0x10142c9c5fb0002 2024-11-16T08:36:29,699 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T08:36:29,699 DEBUG [RS:1;c27dd56784bd:44603 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c27dd56784bd,44603,1731746189510 2024-11-16T08:36:29,699 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,44603,1731746189510' 2024-11-16T08:36:29,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;c27dd56784bd:44603,5,FailOnTimeoutGroup] 2024-11-16T08:36:29,699 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T08:36:29,699 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-16T08:36:29,700 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T08:36:29,700 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T08:36:29,700 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T08:36:29,700 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T08:36:29,700 DEBUG [RS:1;c27dd56784bd:44603 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c27dd56784bd,44603,1731746189510 2024-11-16T08:36:29,700 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,44603,1731746189510' 2024-11-16T08:36:29,700 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T08:36:29,701 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is c27dd56784bd,44433,1731746187873 2024-11-16T08:36:29,701 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T08:36:29,701 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@302d9b0a 2024-11-16T08:36:29,701 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T08:36:29,701 DEBUG [RS:1;c27dd56784bd:44603 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T08:36:29,701 INFO [RS:1;c27dd56784bd:44603 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T08:36:29,701 INFO [RS:1;c27dd56784bd:44603 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T08:36:29,703 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48686, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T08:36:29,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44433 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T08:36:29,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44433 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T08:36:29,704 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44433 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:36:29,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44433 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T08:36:29,708 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T08:36:29,708 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:29,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44433 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-16T08:36:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:36:29,713 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T08:36:29,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43199 is added to blk_1073741835_1011 (size=393) 2024-11-16T08:36:29,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34631 is added to blk_1073741835_1011 (size=393) 2024-11-16T08:36:29,722 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 500cafcb91030067761c75e0b496e280, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58 2024-11-16T08:36:29,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43199 is added to blk_1073741836_1012 (size=76) 2024-11-16T08:36:29,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34631 is added to blk_1073741836_1012 (size=76) 2024-11-16T08:36:29,729 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:29,729 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 500cafcb91030067761c75e0b496e280, disabling compactions & flushes 2024-11-16T08:36:29,729 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:29,729 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:29,729 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. after waiting 0 ms 2024-11-16T08:36:29,729 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:29,729 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:29,729 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 500cafcb91030067761c75e0b496e280: Waiting for close lock at 1731746189729Disabling compacts and flushes for region at 1731746189729Disabling writes for close at 1731746189729Writing region close event to WAL at 1731746189729Closed at 1731746189729 2024-11-16T08:36:29,731 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T08:36:29,731 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731746189731"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731746189731"}]},"ts":"1731746189731"} 2024-11-16T08:36:29,734 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T08:36:29,736 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T08:36:29,736 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746189736"}]},"ts":"1731746189736"} 2024-11-16T08:36:29,739 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-16T08:36:29,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=500cafcb91030067761c75e0b496e280, ASSIGN}] 2024-11-16T08:36:29,741 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=500cafcb91030067761c75e0b496e280, ASSIGN 2024-11-16T08:36:29,742 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=500cafcb91030067761c75e0b496e280, ASSIGN; state=OFFLINE, location=c27dd56784bd,34739,1731746188041; forceNewPlan=false, retain=false 2024-11-16T08:36:29,804 INFO [RS:1;c27dd56784bd:44603 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C44603%2C1731746189510, suffix=, logDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510, archiveDir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs, maxLogs=32 2024-11-16T08:36:29,805 INFO [RS:1;c27dd56784bd:44603 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C44603%2C1731746189510.1731746189804 2024-11-16T08:36:29,814 INFO [RS:1;c27dd56784bd:44603 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 2024-11-16T08:36:29,815 DEBUG [RS:1;c27dd56784bd:44603 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43371:43371),(127.0.0.1/127.0.0.1:45083:45083)] 2024-11-16T08:36:29,893 INFO [c27dd56784bd:44433 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-16T08:36:29,894 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=500cafcb91030067761c75e0b496e280, regionState=OPENING, regionLocation=c27dd56784bd,34739,1731746188041 2024-11-16T08:36:29,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=500cafcb91030067761c75e0b496e280, ASSIGN because future has completed 2024-11-16T08:36:29,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 500cafcb91030067761c75e0b496e280, server=c27dd56784bd,34739,1731746188041}] 2024-11-16T08:36:30,061 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:30,062 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 500cafcb91030067761c75e0b496e280, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:36:30,063 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,063 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:36:30,063 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,063 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,066 INFO [StoreOpener-500cafcb91030067761c75e0b496e280-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,069 INFO [StoreOpener-500cafcb91030067761c75e0b496e280-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 500cafcb91030067761c75e0b496e280 columnFamilyName info 2024-11-16T08:36:30,069 DEBUG [StoreOpener-500cafcb91030067761c75e0b496e280-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:36:30,070 INFO [StoreOpener-500cafcb91030067761c75e0b496e280-1 {}] regionserver.HStore(327): Store=500cafcb91030067761c75e0b496e280/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:36:30,070 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,072 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,073 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,073 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,074 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,076 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,078 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:36:30,079 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 500cafcb91030067761c75e0b496e280; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724374, jitterRate=-0.07891127467155457}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T08:36:30,079 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:30,079 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 500cafcb91030067761c75e0b496e280: Running coprocessor pre-open hook at 1731746190064Writing region info on filesystem at 1731746190064Initializing all the Stores at 1731746190065 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746190066 (+1 ms)Cleaning up temporary data from old regions at 1731746190074 (+8 ms)Running coprocessor post-open hooks at 1731746190079 (+5 ms)Region opened successfully at 1731746190079 2024-11-16T08:36:30,080 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280., pid=6, masterSystemTime=1731746190054 2024-11-16T08:36:30,083 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:30,083 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:30,084 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=500cafcb91030067761c75e0b496e280, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,34739,1731746188041 2024-11-16T08:36:30,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 500cafcb91030067761c75e0b496e280, server=c27dd56784bd,34739,1731746188041 because future has completed 2024-11-16T08:36:30,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T08:36:30,091 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 500cafcb91030067761c75e0b496e280, server=c27dd56784bd,34739,1731746188041 in 189 msec 2024-11-16T08:36:30,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T08:36:30,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=500cafcb91030067761c75e0b496e280, ASSIGN in 352 msec 2024-11-16T08:36:30,095 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T08:36:30,095 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746190095"}]},"ts":"1731746190095"} 2024-11-16T08:36:30,098 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-16T08:36:30,099 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T08:36:30,102 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 394 msec 2024-11-16T08:36:34,748 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T08:36:34,749 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:34,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:34,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:34,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:36:34,777 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-16T08:36:39,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T08:36:39,419 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-16T08:36:39,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:36:39,744 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-16T08:36:39,744 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-16T08:36:39,751 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T08:36:39,751 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:39,769 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:39,773 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:39,774 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:39,774 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:39,774 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:36:39,774 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fb911ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:39,775 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719bbb9b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:39,870 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f0a2519{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/java.io.tmpdir/jetty-localhost-35379-hadoop-hdfs-3_4_1-tests_jar-_-any-1788384512420563994/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:39,870 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68004957{HTTP/1.1, (http/1.1)}{localhost:35379} 2024-11-16T08:36:39,870 INFO [Time-limited test {}] server.Server(415): Started @118106ms 2024-11-16T08:36:39,871 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:36:39,914 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:39,919 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:39,924 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:39,924 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:39,924 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:36:39,926 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64b86931{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:39,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@624ef820{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:40,033 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@786c904f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/java.io.tmpdir/jetty-localhost-39427-hadoop-hdfs-3_4_1-tests_jar-_-any-3888904492041676889/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:40,034 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18a0c61c{HTTP/1.1, (http/1.1)}{localhost:39427} 2024-11-16T08:36:40,034 INFO [Time-limited test {}] server.Server(415): Started @118270ms 2024-11-16T08:36:40,035 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:36:40,073 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:40,078 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:40,086 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:40,086 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:40,086 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:36:40,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@339da5f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:40,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fb4bc9e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:40,183 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a9b13ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/java.io.tmpdir/jetty-localhost-43865-hadoop-hdfs-3_4_1-tests_jar-_-any-6976243571196998433/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:40,184 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61f29988{HTTP/1.1, (http/1.1)}{localhost:43865} 2024-11-16T08:36:40,184 INFO [Time-limited test {}] server.Server(415): Started @118420ms 2024-11-16T08:36:40,185 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:36:41,555 WARN [Thread-860 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:41,555 WARN [Thread-861 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:41,573 WARN [Thread-801 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:36:41,575 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4416aacc5154469e with lease ID 0xfda6324f07e9c79f: Processing first storage report for DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1 from datanode DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:41,576 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4416aacc5154469e with lease ID 0xfda6324f07e9c79f: from storage DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1 node DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:41,576 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4416aacc5154469e with lease ID 0xfda6324f07e9c79f: Processing first storage report for DS-87218a35-fa50-48b5-8634-e0c9db730bf0 from datanode DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:41,576 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4416aacc5154469e with lease ID 0xfda6324f07e9c79f: from storage DS-87218a35-fa50-48b5-8634-e0c9db730bf0 node DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:41,750 WARN [Thread-872 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data8/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:41,750 WARN [Thread-871 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data7/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:41,765 WARN [Thread-823 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:36:41,767 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x412368719e587283 with lease ID 0xfda6324f07e9c7a0: Processing first storage report for DS-7146477d-586c-4433-a598-53ddf8328319 from datanode DatanodeRegistration(127.0.0.1:37431, datanodeUuid=7455e430-34c9-43b1-a7d2-beffba037926, infoPort=44043, infoSecurePort=0, ipcPort=36601, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:41,767 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x412368719e587283 with lease ID 0xfda6324f07e9c7a0: from storage DS-7146477d-586c-4433-a598-53ddf8328319 node DatanodeRegistration(127.0.0.1:37431, datanodeUuid=7455e430-34c9-43b1-a7d2-beffba037926, infoPort=44043, infoSecurePort=0, ipcPort=36601, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T08:36:41,767 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x412368719e587283 with lease ID 0xfda6324f07e9c7a0: Processing first storage report for DS-72f7b62c-2720-4f08-8e53-b31a34f9e685 from datanode DatanodeRegistration(127.0.0.1:37431, datanodeUuid=7455e430-34c9-43b1-a7d2-beffba037926, infoPort=44043, infoSecurePort=0, ipcPort=36601, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:41,767 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x412368719e587283 with lease ID 0xfda6324f07e9c7a0: from storage DS-72f7b62c-2720-4f08-8e53-b31a34f9e685 node DatanodeRegistration(127.0.0.1:37431, datanodeUuid=7455e430-34c9-43b1-a7d2-beffba037926, infoPort=44043, infoSecurePort=0, ipcPort=36601, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:41,806 WARN [Thread-882 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data9/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:41,806 WARN [Thread-883 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data10/current/BP-1154287115-172.17.0.3-1731746185679/current, will proceed with Du for space computation calculation, 2024-11-16T08:36:41,828 WARN [Thread-845 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:36:41,831 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd4d71ed9be22bd8 with lease ID 0xfda6324f07e9c7a1: Processing first storage report for DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a from datanode DatanodeRegistration(127.0.0.1:32829, datanodeUuid=2fbe8c03-7c67-4a22-8985-c2e9ed140576, infoPort=40857, infoSecurePort=0, ipcPort=41611, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:41,831 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd4d71ed9be22bd8 with lease ID 0xfda6324f07e9c7a1: from storage DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a node DatanodeRegistration(127.0.0.1:32829, datanodeUuid=2fbe8c03-7c67-4a22-8985-c2e9ed140576, infoPort=40857, infoSecurePort=0, ipcPort=41611, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:41,831 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd4d71ed9be22bd8 with lease ID 0xfda6324f07e9c7a1: Processing first storage report for DS-dffb7230-40ee-4a6a-abe2-8cc02b4f5ba4 from datanode DatanodeRegistration(127.0.0.1:32829, datanodeUuid=2fbe8c03-7c67-4a22-8985-c2e9ed140576, infoPort=40857, infoSecurePort=0, ipcPort=41611, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679) 2024-11-16T08:36:41,831 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd4d71ed9be22bd8 with lease ID 0xfda6324f07e9c7a1: from storage DS-dffb7230-40ee-4a6a-abe2-8cc02b4f5ba4 node DatanodeRegistration(127.0.0.1:32829, datanodeUuid=2fbe8c03-7c67-4a22-8985-c2e9ed140576, infoPort=40857, infoSecurePort=0, ipcPort=41611, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:41,919 WARN [ResponseProcessor for block BP-1154287115-172.17.0.3-1731746185679:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1154287115-172.17.0.3-1731746185679:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,919 WARN [ResponseProcessor for block BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,919 WARN [ResponseProcessor for block BP-1154287115-172.17.0.3-1731746185679:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1154287115-172.17.0.3-1731746185679:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,919 WARN [ResponseProcessor for block BP-1154287115-172.17.0.3-1731746185679:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1154287115-172.17.0.3-1731746185679:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,921 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746188187 block BP-1154287115-172.17.0.3-1731746185679:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:41,921 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta block BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:41,921 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 block BP-1154287115-172.17.0.3-1731746185679:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:41,922 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 block BP-1154287115-172.17.0.3-1731746185679:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:41,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:43822 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43822 dst: /127.0.0.1:34631 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:41,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-519316165_22 at /127.0.0.1:43786 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43786 dst: /127.0.0.1:34631 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:41,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-739149988_22 at /127.0.0.1:35912 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35912 dst: /127.0.0.1:43199 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:41,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:43814 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43814 dst: /127.0.0.1:34631 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:41,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35888 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35888 dst: /127.0.0.1:43199 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:41,924 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35876 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35876 dst: /127.0.0.1:43199 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:41,924 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@48b0be64{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:41,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-739149988_22 at /127.0.0.1:43846 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43846 dst: /127.0.0.1:34631 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:41,925 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4cae84f2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:41,925 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:41,925 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2dc8ddff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:41,925 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-519316165_22 at /127.0.0.1:35850 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35850 dst: /127.0.0.1:43199 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:41,925 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57ebe64c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:41,926 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:36:41,926 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:36:41,926 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1154287115-172.17.0.3-1731746185679 (Datanode Uuid 37c37cb5-bfce-4659-be37-7fdcb648c3f2) service to localhost/127.0.0.1:34591 2024-11-16T08:36:41,926 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:36:41,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data3/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:41,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data4/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:41,927 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:36:41,928 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 block BP-1154287115-172.17.0.3-1731746185679:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,928 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta block BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,929 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 block BP-1154287115-172.17.0.3-1731746185679:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5afc739a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:41,930 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@200d0f88{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:41,930 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:41,930 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@3795971b {}] datanode.DataXceiver(331): 127.0.0.1:43199:DataXceiver error processing unknown operation src: /127.0.0.1:39614 dst: /127.0.0.1:43199 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:41,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21f2acf7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:41,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@180ba686{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:41,930 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746188187 block BP-1154287115-172.17.0.3-1731746185679:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,931 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:36:41,931 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:36:41,931 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1154287115-172.17.0.3-1731746185679 (Datanode Uuid 4f80ac99-a673-4253-bc8d-d38e9d3b994a) service to localhost/127.0.0.1:34591 2024-11-16T08:36:41,931 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:36:41,932 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data1/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:41,932 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data2/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:41,932 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:36:41,936 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280., hostname=c27dd56784bd,34739,1731746188041, seqNum=2] 2024-11-16T08:36:41,937 ERROR [FSHLog-0-hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58-prefix:c27dd56784bd,34739,1731746188041 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,937 WARN [FSHLog-0-hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58-prefix:c27dd56784bd,34739,1731746188041 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,938 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,938 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C34739%2C1731746188041:(num 1731746188667) roll requested 2024-11-16T08:36:41,938 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34739%2C1731746188041.1731746201938 2024-11-16T08:36:41,943 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:41,943 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:41,943 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:41,943 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:41,943 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:41,943 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746201938 2024-11-16T08:36:41,944 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,944 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:41,945 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-16T08:36:41,945 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-16T08:36:41,945 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 2024-11-16T08:36:41,947 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44043:44043),(127.0.0.1/127.0.0.1:40857:40857)] 2024-11-16T08:36:41,947 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 is not closed yet, will try archiving it next time 2024-11-16T08:36:41,948 WARN [IPC Server handler 3 on default port 34591 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-16T08:36:41,951 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 after 5ms 2024-11-16T08:36:42,163 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:43,636 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:43,947 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:43,949 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746201938 2024-11-16T08:36:43,950 WARN [ResponseProcessor for block BP-1154287115-172.17.0.3-1731746185679:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1154287115-172.17.0.3-1731746185679:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:43,951 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746201938 block BP-1154287115-172.17.0.3-1731746185679:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:43,952 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35436 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37431:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35436 dst: /127.0.0.1:37431 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:43,953 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:44938 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:32829:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44938 dst: /127.0.0.1:32829 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:43,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@786c904f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:43,993 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18a0c61c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:43,994 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:43,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@624ef820{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:43,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64b86931{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:43,997 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:36:43,997 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1154287115-172.17.0.3-1731746185679 (Datanode Uuid 7455e430-34c9-43b1-a7d2-beffba037926) service to localhost/127.0.0.1:34591 2024-11-16T08:36:43,997 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:36:43,997 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:36:43,998 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data7/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:43,998 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data8/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:43,998 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:36:44,164 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:45,637 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:45,948 WARN [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]] 2024-11-16T08:36:45,949 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:45,949 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C34739%2C1731746188041:(num 1731746201938) roll requested 2024-11-16T08:36:45,949 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34739%2C1731746188041.1731746205949 2024-11-16T08:36:45,952 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 after 4007ms 2024-11-16T08:36:45,952 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:45,952 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK], DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:45,952 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741839_1021 2024-11-16T08:36:45,955 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:45,958 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:45,959 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:45,959 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741840_1022 2024-11-16T08:36:45,959 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:45,961 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:45,961 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:45,961 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741841_1023 2024-11-16T08:36:45,962 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:45,967 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:45,967 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:45,967 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:45,967 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:45,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:45,968 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746201938 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746205949 2024-11-16T08:36:45,968 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40857:40857),(127.0.0.1/127.0.0.1:38687:38687)] 2024-11-16T08:36:45,969 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 is not closed yet, will try archiving it next time 2024-11-16T08:36:45,969 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746201938 is not closed yet, will try archiving it next time 2024-11-16T08:36:45,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32829 is added to blk_1073741838_1020 (size=2431) 2024-11-16T08:36:46,003 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T08:36:46,165 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:46,371 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 is not closed yet, will try archiving it next time 2024-11-16T08:36:47,637 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:47,848 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67e2c00e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:32829, datanodeUuid=2fbe8c03-7c67-4a22-8985-c2e9ed140576, infoPort=40857, infoSecurePort=0, ipcPort=41611, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741838_1020 to 127.0.0.1:43199 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:47,969 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,007 WARN [ResponseProcessor for block BP-1154287115-172.17.0.3-1731746185679:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1154287115-172.17.0.3-1731746185679:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,007 WARN [DataStreamer for file /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746205949 block BP-1154287115-172.17.0.3-1731746185679:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:48,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35252 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35252 dst: /127.0.0.1:33283 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,008 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:44954 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:32829:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44954 dst: /127.0.0.1:32829 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,035 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a9b13ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:48,036 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61f29988{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:36:48,036 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:36:48,036 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fb4bc9e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:36:48,036 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@339da5f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,STOPPED} 2024-11-16T08:36:48,038 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:36:48,038 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:36:48,038 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1154287115-172.17.0.3-1731746185679 (Datanode Uuid 2fbe8c03-7c67-4a22-8985-c2e9ed140576) service to localhost/127.0.0.1:34591 2024-11-16T08:36:48,038 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:36:48,038 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data9/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:48,039 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data10/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:36:48,039 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:36:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34739 {}] regionserver.HRegion(8855): Flush requested on 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:48,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 500cafcb91030067761c75e0b496e280 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:36:48,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/fb5b619a53654d96b6f3e776a9aa6409 is 1080, key is row0002/info:/1731746203999/Put/seqid=0 2024-11-16T08:36:48,094 WARN [Thread-913 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37431 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35276 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741843_1026 to mirror 127.0.0.1:37431 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,094 WARN [Thread-913 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:48,094 WARN [Thread-913 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741843_1026 2024-11-16T08:36:48,094 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35276 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:48,094 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35276 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35276 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,095 WARN [Thread-913 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:48,097 WARN [Thread-913 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,097 WARN [Thread-913 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK], DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:48,097 WARN [Thread-913 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741844_1027 2024-11-16T08:36:48,098 WARN [Thread-913 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:48,100 WARN [Thread-913 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,100 WARN [Thread-913 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:48,100 WARN [Thread-913 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741845_1028 2024-11-16T08:36:48,101 WARN [Thread-913 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:48,104 WARN [Thread-913 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34631 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35280 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741846_1029 to mirror 127.0.0.1:34631 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,105 WARN [Thread-913 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:48,105 WARN [Thread-913 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741846_1029 2024-11-16T08:36:48,105 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35280 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:48,105 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35280 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35280 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,107 WARN [Thread-913 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:48,109 WARN [IPC Server handler 3 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T08:36:48,109 WARN [IPC Server handler 3 on default port 34591 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T08:36:48,109 WARN [IPC Server handler 3 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T08:36:48,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741847_1030 (size=10347) 2024-11-16T08:36:48,165 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/fb5b619a53654d96b6f3e776a9aa6409 2024-11-16T08:36:48,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/fb5b619a53654d96b6f3e776a9aa6409 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/fb5b619a53654d96b6f3e776a9aa6409 2024-11-16T08:36:48,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/fb5b619a53654d96b6f3e776a9aa6409, entries=5, sequenceid=11, filesize=10.1 K 2024-11-16T08:36:48,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 500cafcb91030067761c75e0b496e280 in 489ms, sequenceid=11, compaction requested=false 2024-11-16T08:36:48,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 500cafcb91030067761c75e0b496e280: 2024-11-16T08:36:48,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34739 {}] regionserver.HRegion(8855): Flush requested on 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:48,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 500cafcb91030067761c75e0b496e280 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-16T08:36:48,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/7e3580f6efc14ea492dba6e354f4f696 is 1080, key is row0007/info:/1731746208053/Put/seqid=0 2024-11-16T08:36:48,696 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34631 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,696 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35310 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741848_1031 to mirror 127.0.0.1:34631 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,696 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:48,696 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35310 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:48,696 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741848_1031 2024-11-16T08:36:48,697 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35310 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35310 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,697 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:48,699 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,700 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:48,700 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741849_1032 2024-11-16T08:36:48,700 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:48,703 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43199 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,703 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35316 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741850_1033 to mirror 127.0.0.1:43199 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,703 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:48,703 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741850_1033 2024-11-16T08:36:48,704 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35316 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:48,704 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35316 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35316 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:48,704 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:48,706 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:48,706 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:48,706 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741851_1034 2024-11-16T08:36:48,707 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:48,707 WARN [IPC Server handler 0 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T08:36:48,708 WARN [IPC Server handler 0 on default port 34591 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T08:36:48,708 WARN [IPC Server handler 0 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T08:36:48,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741852_1035 (size=12506) 2024-11-16T08:36:49,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/7e3580f6efc14ea492dba6e354f4f696 2024-11-16T08:36:49,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/7e3580f6efc14ea492dba6e354f4f696 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/7e3580f6efc14ea492dba6e354f4f696 2024-11-16T08:36:49,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/7e3580f6efc14ea492dba6e354f4f696, entries=7, sequenceid=24, filesize=12.2 K 2024-11-16T08:36:49,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 500cafcb91030067761c75e0b496e280 in 449ms, sequenceid=24, compaction requested=false 2024-11-16T08:36:49,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 500cafcb91030067761c75e0b496e280: 2024-11-16T08:36:49,136 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-16T08:36:49,136 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:49,136 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/7e3580f6efc14ea492dba6e354f4f696 because midkey is the same as first or last row 2024-11-16T08:36:49,637 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:49,969 WARN [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]] 2024-11-16T08:36:49,969 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:49,970 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C34739%2C1731746188041:(num 1731746205949) roll requested 2024-11-16T08:36:49,970 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34739%2C1731746188041.1731746209970 2024-11-16T08:36:49,973 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:49,973 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:49,973 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741853_1036 2024-11-16T08:36:49,974 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:49,975 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:49,976 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:49,976 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741854_1037 2024-11-16T08:36:49,976 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:49,978 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:49,978 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:49,978 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741855_1038 2024-11-16T08:36:49,979 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:49,980 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:49,980 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:49,980 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741856_1039 2024-11-16T08:36:49,981 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:49,982 WARN [IPC Server handler 4 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T08:36:49,982 WARN [IPC Server handler 4 on default port 34591 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T08:36:49,982 WARN [IPC Server handler 4 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T08:36:49,985 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:49,985 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:49,985 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:49,985 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:49,985 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:49,985 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746205949 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746209970 2024-11-16T08:36:49,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741842_1025 (size=25992) 2024-11-16T08:36:50,000 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38687:38687)] 2024-11-16T08:36:50,000 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 is not closed yet, will try archiving it next time 2024-11-16T08:36:50,000 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746205949 is not closed yet, will try archiving it next time 2024-11-16T08:36:50,004 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746201938 to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs/c27dd56784bd%2C34739%2C1731746188041.1731746201938 2024-11-16T08:36:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34739 {}] regionserver.HRegion(8855): Flush requested on 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:50,107 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 500cafcb91030067761c75e0b496e280 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T08:36:50,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/d3d5c361ee504d428c42028ad7176aea is 1079, key is tmprow/info:/1731746210106/Put/seqid=0 2024-11-16T08:36:50,114 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:50,114 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:50,114 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741858_1041 2024-11-16T08:36:50,115 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:50,116 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:50,116 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:50,117 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741859_1042 2024-11-16T08:36:50,117 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:50,120 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37431 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:50,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35340 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741860_1043 to mirror 127.0.0.1:37431 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:50,120 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:50,120 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35340 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:50,120 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741860_1043 2024-11-16T08:36:50,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35340 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35340 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:50,121 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:50,123 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:50,123 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:50,123 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741861_1044 2024-11-16T08:36:50,124 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:50,125 WARN [IPC Server handler 2 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T08:36:50,125 WARN [IPC Server handler 2 on default port 34591 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T08:36:50,125 WARN [IPC Server handler 2 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T08:36:50,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741862_1045 (size=6027) 2024-11-16T08:36:50,166 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:50,388 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 is not closed yet, will try archiving it next time 2024-11-16T08:36:50,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/d3d5c361ee504d428c42028ad7176aea 2024-11-16T08:36:50,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/d3d5c361ee504d428c42028ad7176aea as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/d3d5c361ee504d428c42028ad7176aea 2024-11-16T08:36:50,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/d3d5c361ee504d428c42028ad7176aea, entries=1, sequenceid=34, filesize=5.9 K 2024-11-16T08:36:50,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 500cafcb91030067761c75e0b496e280 in 439ms, sequenceid=34, compaction requested=true 2024-11-16T08:36:50,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 500cafcb91030067761c75e0b496e280: 2024-11-16T08:36:50,546 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-16T08:36:50,546 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:50,546 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/7e3580f6efc14ea492dba6e354f4f696 because midkey is the same as first or last row 2024-11-16T08:36:50,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 500cafcb91030067761c75e0b496e280:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:36:50,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:36:50,547 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:36:50,548 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:36:50,548 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HStore(1541): 500cafcb91030067761c75e0b496e280/info is initiating minor compaction (all files) 2024-11-16T08:36:50,548 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 500cafcb91030067761c75e0b496e280/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:50,549 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/fb5b619a53654d96b6f3e776a9aa6409, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/7e3580f6efc14ea492dba6e354f4f696, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/d3d5c361ee504d428c42028ad7176aea] into tmpdir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp, totalSize=28.2 K 2024-11-16T08:36:50,549 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb5b619a53654d96b6f3e776a9aa6409, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731746203999 2024-11-16T08:36:50,550 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e3580f6efc14ea492dba6e354f4f696, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731746208053 2024-11-16T08:36:50,550 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3d5c361ee504d428c42028ad7176aea, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731746210106 2024-11-16T08:36:50,568 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 500cafcb91030067761c75e0b496e280#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:36:50,569 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/002a252b1ce7451aaf1bd36750b57c00 is 1080, key is row0002/info:/1731746203999/Put/seqid=0 2024-11-16T08:36:50,571 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:50,572 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:50,572 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741863_1046 2024-11-16T08:36:50,572 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:50,588 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5b3545ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741847_1030 to 127.0.0.1:37431 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:50,589 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34631 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:50,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35376 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741864_1047 to mirror 127.0.0.1:34631 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:50,589 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:50,589 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741864_1047 2024-11-16T08:36:50,589 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35376 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:50,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35376 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35376 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:50,590 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:50,590 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@55f2b0fa[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741852_1035 to 127.0.0.1:34631 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:50,592 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:50,592 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:50,592 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741865_1048 2024-11-16T08:36:50,594 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:50,605 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43199 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:50,605 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35384 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741866_1049 to mirror 127.0.0.1:43199 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:50,605 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:50,605 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35384 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:50,605 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741866_1049 2024-11-16T08:36:50,606 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35384 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35384 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:50,606 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:50,607 WARN [IPC Server handler 4 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T08:36:50,607 WARN [IPC Server handler 4 on default port 34591 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T08:36:50,607 WARN [IPC Server handler 4 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T08:36:50,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741867_1050 (size=17994) 2024-11-16T08:36:51,023 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/002a252b1ce7451aaf1bd36750b57c00 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00 2024-11-16T08:36:51,032 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 500cafcb91030067761c75e0b496e280/info of 500cafcb91030067761c75e0b496e280 into 002a252b1ce7451aaf1bd36750b57c00(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:36:51,032 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 500cafcb91030067761c75e0b496e280: 2024-11-16T08:36:51,032 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280., storeName=500cafcb91030067761c75e0b496e280/info, priority=13, startTime=1731746210547; duration=0sec 2024-11-16T08:36:51,032 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T08:36:51,032 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:51,032 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00 because midkey is the same as first or last row 2024-11-16T08:36:51,033 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T08:36:51,033 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:51,033 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00 because midkey is the same as first or last row 2024-11-16T08:36:51,033 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-16T08:36:51,033 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:51,033 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00 because midkey is the same as first or last row 2024-11-16T08:36:51,033 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:36:51,033 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 500cafcb91030067761c75e0b496e280:info 2024-11-16T08:36:51,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34739 {}] regionserver.HRegion(8855): Flush requested on 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:51,527 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 500cafcb91030067761c75e0b496e280 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T08:36:51,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/164101e4b8df4fdaba8f8c396e9b3876 is 1079, key is tmprow/info:/1731746211526/Put/seqid=0 2024-11-16T08:36:51,534 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:51,534 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK], DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:51,534 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741868_1051 2024-11-16T08:36:51,535 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:51,538 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34631 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:51,538 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35406 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741869_1052 to mirror 127.0.0.1:34631 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:51,538 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:51,538 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35406 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:51,538 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741869_1052 2024-11-16T08:36:51,538 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35406 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35406 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:51,539 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:51,541 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:51,541 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:51,541 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741870_1053 2024-11-16T08:36:51,542 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:51,544 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32829 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:51,544 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35422 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741871_1054 to mirror 127.0.0.1:32829 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:51,545 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:51,545 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741871_1054 2024-11-16T08:36:51,545 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35422 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:51,545 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:35422 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35422 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:51,545 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:51,546 WARN [IPC Server handler 1 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T08:36:51,546 WARN [IPC Server handler 1 on default port 34591 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T08:36:51,546 WARN [IPC Server handler 1 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T08:36:51,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741872_1055 (size=6027) 2024-11-16T08:36:51,577 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@55f2b0fa[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741842_1025 to 127.0.0.1:32829 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:51,577 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5b3545ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741862_1045 to 127.0.0.1:37431 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:51,638 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:51,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/164101e4b8df4fdaba8f8c396e9b3876 2024-11-16T08:36:51,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/164101e4b8df4fdaba8f8c396e9b3876 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/164101e4b8df4fdaba8f8c396e9b3876 2024-11-16T08:36:51,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/164101e4b8df4fdaba8f8c396e9b3876, entries=1, sequenceid=45, filesize=5.9 K 2024-11-16T08:36:51,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 500cafcb91030067761c75e0b496e280 in 439ms, sequenceid=45, compaction requested=false 2024-11-16T08:36:51,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 500cafcb91030067761c75e0b496e280: 2024-11-16T08:36:51,967 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-16T08:36:51,967 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:51,967 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00 because midkey is the same as first or last row 2024-11-16T08:36:52,004 WARN [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]] 2024-11-16T08:36:52,004 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,004 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C34739%2C1731746188041:(num 1731746209970) roll requested 2024-11-16T08:36:52,004 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34739%2C1731746188041.1731746212004 2024-11-16T08:36:52,007 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,008 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:52,008 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741873_1056 2024-11-16T08:36:52,008 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:52,010 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,010 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:52,010 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741874_1057 2024-11-16T08:36:52,011 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:52,012 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,013 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:52,013 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741875_1058 2024-11-16T08:36:52,013 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:52,014 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,015 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:52,015 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741876_1059 2024-11-16T08:36:52,015 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:52,016 WARN [IPC Server handler 1 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T08:36:52,016 WARN [IPC Server handler 1 on default port 34591 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T08:36:52,016 WARN [IPC Server handler 1 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T08:36:52,019 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:52,019 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:52,019 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:52,019 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:52,019 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:52,019 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746209970 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746212004 2024-11-16T08:36:52,020 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38687:38687)] 2024-11-16T08:36:52,020 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 is not closed yet, will try archiving it next time 2024-11-16T08:36:52,020 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746209970 is not closed yet, will try archiving it next time 2024-11-16T08:36:52,020 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746205949 to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs/c27dd56784bd%2C34739%2C1731746188041.1731746205949 2024-11-16T08:36:52,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741857_1040 (size=13591) 2024-11-16T08:36:52,166 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,422 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 is not closed yet, will try archiving it next time 2024-11-16T08:36:52,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34739 {}] regionserver.HRegion(8855): Flush requested on 500cafcb91030067761c75e0b496e280 2024-11-16T08:36:52,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 500cafcb91030067761c75e0b496e280 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T08:36:52,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/c38d3c5ad4504b9bbad7cf563bc1dbe5 is 1079, key is tmprow/info:/1731746212947/Put/seqid=0 2024-11-16T08:36:52,954 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,954 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:52,954 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741878_1061 2024-11-16T08:36:52,955 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:52,957 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32829 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,957 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:44812 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6]'}, localName='127.0.0.1:33283', datanodeUuid='7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae', xmitsInProgress=0}:Exception transferring block BP-1154287115-172.17.0.3-1731746185679:blk_1073741879_1062 to mirror 127.0.0.1:32829 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:52,958 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK], DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:52,958 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741879_1062 2024-11-16T08:36:52,958 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:44812 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-16T08:36:52,958 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779196900_22 at /127.0.0.1:44812 [Receiving block BP-1154287115-172.17.0.3-1731746185679:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:33283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44812 dst: /127.0.0.1:33283 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:52,958 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:52,960 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,960 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:52,960 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741880_1063 2024-11-16T08:36:52,961 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:52,963 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:52,963 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:52,963 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741881_1064 2024-11-16T08:36:52,964 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:52,965 WARN [IPC Server handler 1 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T08:36:52,965 WARN [IPC Server handler 1 on default port 34591 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T08:36:52,965 WARN [IPC Server handler 1 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T08:36:52,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741882_1065 (size=6027) 2024-11-16T08:36:52,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/c38d3c5ad4504b9bbad7cf563bc1dbe5 2024-11-16T08:36:52,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/c38d3c5ad4504b9bbad7cf563bc1dbe5 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/c38d3c5ad4504b9bbad7cf563bc1dbe5 2024-11-16T08:36:52,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/c38d3c5ad4504b9bbad7cf563bc1dbe5, entries=1, sequenceid=55, filesize=5.9 K 2024-11-16T08:36:52,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 500cafcb91030067761c75e0b496e280 in 38ms, sequenceid=55, compaction requested=true 2024-11-16T08:36:52,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 500cafcb91030067761c75e0b496e280: 2024-11-16T08:36:52,986 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-16T08:36:52,987 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:52,987 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00 because midkey is the same as first or last row 2024-11-16T08:36:52,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 500cafcb91030067761c75e0b496e280:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:36:52,987 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:36:52,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:36:52,988 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:36:52,989 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HStore(1541): 500cafcb91030067761c75e0b496e280/info is initiating minor compaction (all files) 2024-11-16T08:36:52,989 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 500cafcb91030067761c75e0b496e280/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:36:52,989 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/164101e4b8df4fdaba8f8c396e9b3876, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/c38d3c5ad4504b9bbad7cf563bc1dbe5] into tmpdir=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp, totalSize=29.3 K 2024-11-16T08:36:52,989 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.Compactor(225): Compacting 002a252b1ce7451aaf1bd36750b57c00, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731746203999 2024-11-16T08:36:52,990 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.Compactor(225): Compacting 164101e4b8df4fdaba8f8c396e9b3876, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731746211526 2024-11-16T08:36:52,990 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] compactions.Compactor(225): Compacting c38d3c5ad4504b9bbad7cf563bc1dbe5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731746212947 2024-11-16T08:36:53,008 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 500cafcb91030067761c75e0b496e280#info#compaction#24 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:36:53,008 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/8841f997fe0a490682e06d5834247126 is 1080, key is row0002/info:/1731746203999/Put/seqid=0 2024-11-16T08:36:53,010 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:53,011 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:53,011 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741883_1066 2024-11-16T08:36:53,011 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:53,013 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:53,013 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]) is bad. 2024-11-16T08:36:53,014 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741884_1067 2024-11-16T08:36:53,014 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34631,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK] 2024-11-16T08:36:53,016 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:53,017 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:53,017 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741885_1068 2024-11-16T08:36:53,018 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:53,019 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:53,020 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]) is bad. 2024-11-16T08:36:53,020 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741886_1069 2024-11-16T08:36:53,020 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK] 2024-11-16T08:36:53,021 WARN [IPC Server handler 2 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-16T08:36:53,021 WARN [IPC Server handler 2 on default port 34591 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-16T08:36:53,022 WARN [IPC Server handler 2 on default port 34591 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-16T08:36:53,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741887_1070 (size=18097) 2024-11-16T08:36:53,434 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/8841f997fe0a490682e06d5834247126 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/8841f997fe0a490682e06d5834247126 2024-11-16T08:36:53,442 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 500cafcb91030067761c75e0b496e280/info of 500cafcb91030067761c75e0b496e280 into 8841f997fe0a490682e06d5834247126(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:36:53,442 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 500cafcb91030067761c75e0b496e280: 2024-11-16T08:36:53,442 INFO [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280., storeName=500cafcb91030067761c75e0b496e280/info, priority=13, startTime=1731746212987; duration=0sec 2024-11-16T08:36:53,442 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T08:36:53,442 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:53,442 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/8841f997fe0a490682e06d5834247126 because midkey is the same as first or last row 2024-11-16T08:36:53,442 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T08:36:53,442 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:53,442 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/8841f997fe0a490682e06d5834247126 because midkey is the same as first or last row 2024-11-16T08:36:53,443 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-16T08:36:53,443 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:36:53,443 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/8841f997fe0a490682e06d5834247126 because midkey is the same as first or last row 2024-11-16T08:36:53,443 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:36:53,443 DEBUG [RS:0;c27dd56784bd:34739-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 500cafcb91030067761c75e0b496e280:info 2024-11-16T08:36:53,577 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5b3545ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741872_1055 to 127.0.0.1:32829 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:53,577 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@55f2b0fa[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741867_1050 to 127.0.0.1:34631 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:53,638 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:54,021 WARN [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-16T08:36:54,021 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:54,166 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:54,172 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:36:54,177 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:36:54,178 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:36:54,178 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:36:54,178 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:36:54,179 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c8ca2dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:36:54,179 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41a1c5f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:36:54,293 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26e43435{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/java.io.tmpdir/jetty-localhost-36401-hadoop-hdfs-3_4_1-tests_jar-_-any-15496044658333496083/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:36:54,293 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34e71a9{HTTP/1.1, (http/1.1)}{localhost:36401} 2024-11-16T08:36:54,293 INFO [Time-limited test {}] server.Server(415): Started @132529ms 2024-11-16T08:36:54,295 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:36:54,578 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@55f2b0fa[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741857_1040 to 127.0.0.1:43199 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:54,578 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5b3545ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741882_1065 to 127.0.0.1:37431 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:54,717 WARN [Thread-980 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:36:54,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e53bc5fc4b7aa3c with lease ID 0xfda6324f07e9c7a2: from storage DS-af74058c-b8da-46d5-95f6-9e0ec577f137 node DatanodeRegistration(127.0.0.1:43207, datanodeUuid=37c37cb5-bfce-4659-be37-7fdcb648c3f2, infoPort=44551, infoSecurePort=0, ipcPort=36475, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:54,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e53bc5fc4b7aa3c with lease ID 0xfda6324f07e9c7a2: from storage DS-271cabbe-032c-48da-a1ca-f357b40a914c node DatanodeRegistration(127.0.0.1:43207, datanodeUuid=37c37cb5-bfce-4659-be37-7fdcb648c3f2, infoPort=44551, infoSecurePort=0, ipcPort=36475, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:36:55,639 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:56,021 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:56,167 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:56,577 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5b3545ec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33283, datanodeUuid=7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae, infoPort=38687, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741887_1070 to 127.0.0.1:32829 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:36:57,639 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:57,852 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T08:36:58,021 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:58,167 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:58,409 ERROR [FSHLog-0-hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData-prefix:c27dd56784bd,44433,1731746187873 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:58,410 WARN [FSHLog-0-hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData-prefix:c27dd56784bd,44433,1731746187873 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:58,410 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C44433%2C1731746187873:(num 1731746188187) roll requested 2024-11-16T08:36:58,410 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C44433%2C1731746187873.1731746218410 2024-11-16T08:36:58,413 WARN [Thread-1000 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:58,414 WARN [Thread-1000 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:36:58,414 WARN [Thread-1000 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741888_1071 2024-11-16T08:36:58,414 WARN [Thread-1000 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:36:58,416 WARN [Thread-1000 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:58,416 WARN [Thread-1000 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK]) is bad. 2024-11-16T08:36:58,416 WARN [Thread-1000 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741889_1072 2024-11-16T08:36:58,417 WARN [Thread-1000 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37431,DS-7146477d-586c-4433-a598-53ddf8328319,DISK] 2024-11-16T08:36:58,422 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:58,422 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:58,422 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:58,422 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:58,422 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:36:58,422 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746188187 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746218410 2024-11-16T08:36:58,423 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:58,423 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:36:58,423 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746188187 2024-11-16T08:36:58,424 WARN [IPC Server handler 1 on default port 34591 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746188187 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1006 2024-11-16T08:36:58,424 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746188187 after 1ms 2024-11-16T08:36:58,424 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38687:38687),(127.0.0.1/127.0.0.1:44551:44551)] 2024-11-16T08:36:58,424 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746188187 is not closed yet, will try archiving it next time 2024-11-16T08:36:59,640 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:00,022 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:01,640 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:02,022 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:02,425 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/WALs/c27dd56784bd,44433,1731746187873/c27dd56784bd%2C44433%2C1731746187873.1731746188187 after 4002ms 2024-11-16T08:37:03,640 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:04,023 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:04,745 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7a2dd050 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1154287115-172.17.0.3-1731746185679:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:43199,null,null]) java.net.ConnectException: Call From c27dd56784bd/172.17.0.3 to localhost:37695 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T08:37:04,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741833_1019 (size=455) 2024-11-16T08:37:04,969 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746188667 to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs/c27dd56784bd%2C34739%2C1731746188041.1731746188667 2024-11-16T08:37:04,970 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746209970 to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs/c27dd56784bd%2C34739%2C1731746188041.1731746209970 2024-11-16T08:37:05,641 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:05,723 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4b1de8ab[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43207, datanodeUuid=37c37cb5-bfce-4659-be37-7fdcb648c3f2, infoPort=44551, infoSecurePort=0, ipcPort=36475, storageInfo=lv=-57;cid=testClusterID;nsid=313643864;c=1731746185679):Failed to transfer BP-1154287115-172.17.0.3-1731746185679:blk_1073741833_1019 to 127.0.0.1:32829 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:06,023 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:07,642 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:07,871 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34739%2C1731746188041.1731746227871 2024-11-16T08:37:07,885 WARN [Thread-1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:07,885 WARN [Thread-1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:37:07,885 WARN [Thread-1010 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741891_1075 2024-11-16T08:37:07,891 WARN [Thread-1010 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:37:07,956 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:07,956 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:07,959 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:07,960 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:07,960 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:07,961 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746212004 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.1731746227871 2024-11-16T08:37:07,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741877_1060 (size=12911) 2024-11-16T08:37:07,972 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44551:44551),(127.0.0.1/127.0.0.1:38687:38687)] 2024-11-16T08:37:08,023 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-16T08:37:08,024 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34739 {}] regionserver.HRegion(8855): Flush requested on 500cafcb91030067761c75e0b496e280 2024-11-16T08:37:08,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 500cafcb91030067761c75e0b496e280 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-16T08:37:08,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/fa0725be387a4d4695b3523118f750b7 is 1080, key is row0013/info:/1731746227980/Put/seqid=0 2024-11-16T08:37:08,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T08:37:08,132 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:37:08,132 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:37:08,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:08,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:08,132 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T08:37:08,132 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1643275885, stopped=false 2024-11-16T08:37:08,133 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c27dd56784bd,44433,1731746187873 2024-11-16T08:37:08,133 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T08:37:08,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741893_1077 (size=8190) 2024-11-16T08:37:08,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/fa0725be387a4d4695b3523118f750b7 2024-11-16T08:37:08,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741893_1077 (size=8190) 2024-11-16T08:37:08,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/fa0725be387a4d4695b3523118f750b7 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/fa0725be387a4d4695b3523118f750b7 2024-11-16T08:37:08,196 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:37:08,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:37:08,196 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:08,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:08,196 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:37:08,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44603-0x10142c9c5fb0002, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:37:08,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44603-0x10142c9c5fb0002, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:08,197 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:37:08,197 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:37:08,198 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:08,198 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c27dd56784bd,34739,1731746188041' ***** 2024-11-16T08:37:08,198 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T08:37:08,198 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c27dd56784bd,44603,1731746189510' ***** 2024-11-16T08:37:08,198 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T08:37:08,199 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44603-0x10142c9c5fb0002, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:37:08,199 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:37:08,199 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:37:08,208 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T08:37:08,208 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T08:37:08,209 INFO [RS:1;c27dd56784bd:44603 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T08:37:08,209 INFO [RS:1;c27dd56784bd:44603 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T08:37:08,209 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(959): stopping server c27dd56784bd,44603,1731746189510 2024-11-16T08:37:08,209 INFO [RS:1;c27dd56784bd:44603 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:37:08,209 INFO [RS:1;c27dd56784bd:44603 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c27dd56784bd:44603. 2024-11-16T08:37:08,209 DEBUG [RS:1;c27dd56784bd:44603 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:37:08,209 DEBUG [RS:1;c27dd56784bd:44603 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:08,209 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(976): stopping server c27dd56784bd,44603,1731746189510; all regions closed. 2024-11-16T08:37:08,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/fa0725be387a4d4695b3523118f750b7, entries=3, sequenceid=66, filesize=8.0 K 2024-11-16T08:37:08,212 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T08:37:08,220 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8608 for 500cafcb91030067761c75e0b496e280 in 195ms, sequenceid=66, compaction requested=false 2024-11-16T08:37:08,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 500cafcb91030067761c75e0b496e280: 2024-11-16T08:37:08,222 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-16T08:37:08,222 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:37:08,222 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/8841f997fe0a490682e06d5834247126 because midkey is the same as first or last row 2024-11-16T08:37:08,222 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T08:37:08,222 INFO [RS:0;c27dd56784bd:34739 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T08:37:08,222 INFO [RS:0;c27dd56784bd:34739 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T08:37:08,223 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(3091): Received CLOSE for 500cafcb91030067761c75e0b496e280 2024-11-16T08:37:08,223 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(959): stopping server c27dd56784bd,34739,1731746188041 2024-11-16T08:37:08,223 INFO [RS:0;c27dd56784bd:34739 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:37:08,223 INFO [RS:0;c27dd56784bd:34739 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c27dd56784bd:34739. 2024-11-16T08:37:08,223 DEBUG [RS:0;c27dd56784bd:34739 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:37:08,223 DEBUG [RS:0;c27dd56784bd:34739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:08,224 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T08:37:08,224 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T08:37:08,224 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T08:37:08,224 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,224 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T08:37:08,224 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,227 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 500cafcb91030067761c75e0b496e280, disabling compactions & flushes 2024-11-16T08:37:08,227 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:37:08,227 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:37:08,227 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. after waiting 0 ms 2024-11-16T08:37:08,227 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:37:08,228 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 500cafcb91030067761c75e0b496e280 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-16T08:37:08,228 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,228 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T08:37:08,228 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 500cafcb91030067761c75e0b496e280=TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.} 2024-11-16T08:37:08,228 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 500cafcb91030067761c75e0b496e280 2024-11-16T08:37:08,231 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,232 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:37:08,232 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:37:08,232 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:37:08,232 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:37:08,232 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:37:08,233 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-16T08:37:08,233 ERROR [FSHLog-0-hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58-prefix:c27dd56784bd,34739,1731746188041.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:08,233 WARN [FSHLog-0-hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58-prefix:c27dd56784bd,34739,1731746188041.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:08,233 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C34739%2C1731746188041.meta:.meta(num 1731746189235) roll requested 2024-11-16T08:37:08,234 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C34739%2C1731746188041.meta.1731746228234.meta 2024-11-16T08:37:08,245 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:08,246 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:08,246 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 2024-11-16T08:37:08,250 WARN [IPC Server handler 0 on default port 34591 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 has not been closed. Lease recovery is in progress. RecoveryId = 1079 for block blk_1073741837_1013 2024-11-16T08:37:08,252 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 after 6ms 2024-11-16T08:37:08,260 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/326a389d53d648999fb63c2d791d305c is 1080, key is row0015/info:/1731746228027/Put/seqid=0 2024-11-16T08:37:08,265 WARN [Thread-1026 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:08,265 WARN [Thread-1026 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:43207,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:37:08,265 WARN [Thread-1026 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741895_1080 2024-11-16T08:37:08,273 WARN [Thread-1026 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:37:08,305 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,305 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,306 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,308 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,308 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:08,308 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746228234.meta 2024-11-16T08:37:08,332 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:08,332 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43199,DS-8c065950-89b1-42e2-9894-acfcfdb20745,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:08,332 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta 2024-11-16T08:37:08,333 WARN [IPC Server handler 0 on default port 34591 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1010 2024-11-16T08:37:08,340 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta after 8ms 2024-11-16T08:37:08,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741896_1081 (size=13586) 2024-11-16T08:37:08,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741896_1081 (size=13586) 2024-11-16T08:37:08,356 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44551:44551),(127.0.0.1/127.0.0.1:38687:38687)] 2024-11-16T08:37:08,357 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta is not closed yet, will try archiving it next time 2024-11-16T08:37:08,427 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/.tmp/info/864ce0b09f4740faa3b234ca32ece4ff is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280./info:regioninfo/1731746190083/Put/seqid=0 2024-11-16T08:37:08,429 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 500cafcb91030067761c75e0b496e280 2024-11-16T08:37:08,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741897_1083 (size=7089) 2024-11-16T08:37:08,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741897_1083 (size=7089) 2024-11-16T08:37:08,493 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/.tmp/info/864ce0b09f4740faa3b234ca32ece4ff 2024-11-16T08:37:08,533 INFO [regionserver/c27dd56784bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:37:08,536 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/.tmp/ns/9aff48519268409e8e12e0c03f1b1133 is 43, key is default/ns:d/1731746189373/Put/seqid=0 2024-11-16T08:37:08,541 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:08,542 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:43207,DS-af74058c-b8da-46d5-95f6-9e0ec577f137,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:37:08,542 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741898_1084 2024-11-16T08:37:08,545 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:37:08,560 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T08:37:08,560 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T08:37:08,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741899_1085 (size=5153) 2024-11-16T08:37:08,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741899_1085 (size=5153) 2024-11-16T08:37:08,629 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 500cafcb91030067761c75e0b496e280 2024-11-16T08:37:08,679 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T08:37:08,680 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T08:37:08,767 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/326a389d53d648999fb63c2d791d305c 2024-11-16T08:37:08,797 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/.tmp/info/326a389d53d648999fb63c2d791d305c as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/326a389d53d648999fb63c2d791d305c 2024-11-16T08:37:08,822 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/326a389d53d648999fb63c2d791d305c, entries=8, sequenceid=77, filesize=13.3 K 2024-11-16T08:37:08,825 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 500cafcb91030067761c75e0b496e280 in 597ms, sequenceid=77, compaction requested=true 2024-11-16T08:37:08,829 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 500cafcb91030067761c75e0b496e280 2024-11-16T08:37:08,829 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/fb5b619a53654d96b6f3e776a9aa6409, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/7e3580f6efc14ea492dba6e354f4f696, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/d3d5c361ee504d428c42028ad7176aea, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/164101e4b8df4fdaba8f8c396e9b3876, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/c38d3c5ad4504b9bbad7cf563bc1dbe5] to archive 2024-11-16T08:37:08,832 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T08:37:08,839 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/fb5b619a53654d96b6f3e776a9aa6409 to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/fb5b619a53654d96b6f3e776a9aa6409 2024-11-16T08:37:08,862 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/7e3580f6efc14ea492dba6e354f4f696 to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/7e3580f6efc14ea492dba6e354f4f696 2024-11-16T08:37:08,878 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00 to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/002a252b1ce7451aaf1bd36750b57c00 2024-11-16T08:37:08,898 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/d3d5c361ee504d428c42028ad7176aea to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/d3d5c361ee504d428c42028ad7176aea 2024-11-16T08:37:08,908 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/164101e4b8df4fdaba8f8c396e9b3876 to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/164101e4b8df4fdaba8f8c396e9b3876 2024-11-16T08:37:08,921 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/c38d3c5ad4504b9bbad7cf563bc1dbe5 to hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/info/c38d3c5ad4504b9bbad7cf563bc1dbe5 2024-11-16T08:37:08,922 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c27dd56784bd:44433 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T08:37:08,922 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [fb5b619a53654d96b6f3e776a9aa6409=10347, 7e3580f6efc14ea492dba6e354f4f696=12506, 002a252b1ce7451aaf1bd36750b57c00=17994, d3d5c361ee504d428c42028ad7176aea=6027, 164101e4b8df4fdaba8f8c396e9b3876=6027, c38d3c5ad4504b9bbad7cf563bc1dbe5=6027] 2024-11-16T08:37:08,979 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/default/TestLogRolling-testLogRollOnDatanodeDeath/500cafcb91030067761c75e0b496e280/recovered.edits/80.seqid, newMaxSeqId=80, maxSeqId=1 2024-11-16T08:37:08,981 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:37:08,982 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/.tmp/ns/9aff48519268409e8e12e0c03f1b1133 2024-11-16T08:37:08,984 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 500cafcb91030067761c75e0b496e280: Waiting for close lock at 1731746228227Running coprocessor pre-close hooks at 1731746228227Disabling compacts and flushes for region at 1731746228227Disabling writes for close at 1731746228227Obtaining lock to block concurrent updates at 1731746228228 (+1 ms)Preparing flush snapshotting stores in 500cafcb91030067761c75e0b496e280 at 1731746228228Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280., syncing WAL and waiting on mvcc, flushsize=dataSize=8608, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1731746228232 (+4 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. at 1731746228236 (+4 ms)Flushing 500cafcb91030067761c75e0b496e280/info: creating writer at 1731746228236Flushing 500cafcb91030067761c75e0b496e280/info: appending metadata at 1731746228253 (+17 ms)Flushing 500cafcb91030067761c75e0b496e280/info: closing flushed file at 1731746228254 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12ee629c: reopening flushed file at 1731746228793 (+539 ms)Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 500cafcb91030067761c75e0b496e280 in 597ms, sequenceid=77, compaction requested=true at 1731746228825 (+32 ms)Writing region close event to WAL at 1731746228932 (+107 ms)Running coprocessor post-close hooks at 1731746228981 (+49 ms)Closed at 1731746228981 2024-11-16T08:37:08,986 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731746189704.500cafcb91030067761c75e0b496e280. 2024-11-16T08:37:09,029 DEBUG [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T08:37:09,069 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/.tmp/table/0fa0530513154300b38d404bc3e59a74 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731746190095/Put/seqid=0 2024-11-16T08:37:09,079 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:09,081 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1154287115-172.17.0.3-1731746185679:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK], DatanodeInfoWithStorage[127.0.0.1:33283,DS-91bc1c98-e41c-44bb-9d34-3752665d2cc1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK]) is bad. 2024-11-16T08:37:09,081 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-1154287115-172.17.0.3-1731746185679:blk_1073741900_1086 2024-11-16T08:37:09,085 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32829,DS-b0437106-e5b4-454c-8d94-ac6c582bfc5a,DISK] 2024-11-16T08:37:09,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741901_1087 (size=5424) 2024-11-16T08:37:09,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741901_1087 (size=5424) 2024-11-16T08:37:09,136 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/.tmp/table/0fa0530513154300b38d404bc3e59a74 2024-11-16T08:37:09,146 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/.tmp/info/864ce0b09f4740faa3b234ca32ece4ff as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/info/864ce0b09f4740faa3b234ca32ece4ff 2024-11-16T08:37:09,154 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/info/864ce0b09f4740faa3b234ca32ece4ff, entries=10, sequenceid=11, filesize=6.9 K 2024-11-16T08:37:09,155 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/.tmp/ns/9aff48519268409e8e12e0c03f1b1133 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/ns/9aff48519268409e8e12e0c03f1b1133 2024-11-16T08:37:09,162 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/ns/9aff48519268409e8e12e0c03f1b1133, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T08:37:09,163 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/.tmp/table/0fa0530513154300b38d404bc3e59a74 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/table/0fa0530513154300b38d404bc3e59a74 2024-11-16T08:37:09,170 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/table/0fa0530513154300b38d404bc3e59a74, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T08:37:09,172 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 939ms, sequenceid=11, compaction requested=false 2024-11-16T08:37:09,182 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T08:37:09,184 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:37:09,184 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:37:09,184 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746228232Running coprocessor pre-close hooks at 1731746228232Disabling compacts and flushes for region at 1731746228232Disabling writes for close at 1731746228232Obtaining lock to block concurrent updates at 1731746228233 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731746228233Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731746228233Flushing stores of hbase:meta,,1.1588230740 at 1731746228376 (+143 ms)Flushing 1588230740/info: creating writer at 1731746228376Flushing 1588230740/info: appending metadata at 1731746228426 (+50 ms)Flushing 1588230740/info: closing flushed file at 1731746228426Flushing 1588230740/ns: creating writer at 1731746228502 (+76 ms)Flushing 1588230740/ns: appending metadata at 1731746228536 (+34 ms)Flushing 1588230740/ns: closing flushed file at 1731746228536Flushing 1588230740/table: creating writer at 1731746229017 (+481 ms)Flushing 1588230740/table: appending metadata at 1731746229068 (+51 ms)Flushing 1588230740/table: closing flushed file at 1731746229068Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dc34f10: reopening flushed file at 1731746229145 (+77 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@747e21be: reopening flushed file at 1731746229154 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@267f77f2: reopening flushed file at 1731746229162 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 939ms, sequenceid=11, compaction requested=false at 1731746229172 (+10 ms)Writing region close event to WAL at 1731746229178 (+6 ms)Running coprocessor post-close hooks at 1731746229184 (+6 ms)Closed at 1731746229184 2024-11-16T08:37:09,184 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T08:37:09,230 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(976): stopping server c27dd56784bd,34739,1731746188041; all regions closed. 2024-11-16T08:37:09,231 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:09,232 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:09,232 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:09,233 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:09,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:09,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741894_1078 (size=825) 2024-11-16T08:37:09,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741894_1078 (size=825) 2024-11-16T08:37:09,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-16T08:37:09,420 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:37:09,420 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T08:37:09,435 INFO [master/c27dd56784bd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T08:37:09,435 INFO [master/c27dd56784bd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T08:37:09,639 INFO [regionserver/c27dd56784bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:37:09,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741835_1011 (size=393) 2024-11-16T08:37:09,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:37:10,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:37:10,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:37:11,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741877_1060 (size=12911) 2024-11-16T08:37:11,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:37:11,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:37:12,253 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 after 4007ms 2024-11-16T08:37:12,341 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta after 4009ms 2024-11-16T08:37:12,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:37:13,245 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T08:37:13,259 DEBUG [RS:1;c27dd56784bd:44603 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs 2024-11-16T08:37:13,259 INFO [RS:1;c27dd56784bd:44603 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C44603%2C1731746189510:(num 1731746189804) 2024-11-16T08:37:13,259 DEBUG [RS:1;c27dd56784bd:44603 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:13,259 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:37:13,260 INFO [RS:1;c27dd56784bd:44603 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:37:13,260 INFO [RS:1;c27dd56784bd:44603 {}] hbase.ChoreService(370): Chore service for: regionserver/c27dd56784bd:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T08:37:13,261 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T08:37:13,261 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T08:37:13,261 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T08:37:13,261 INFO [RS:1;c27dd56784bd:44603 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:37:13,261 INFO [RS:1;c27dd56784bd:44603 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44603 2024-11-16T08:37:13,261 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:37:13,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:13,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44603-0x10142c9c5fb0002, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c27dd56784bd,44603,1731746189510 2024-11-16T08:37:13,305 INFO [RS:1;c27dd56784bd:44603 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:37:13,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:37:13,369 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c27dd56784bd,44603,1731746189510] 2024-11-16T08:37:13,448 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c27dd56784bd,44603,1731746189510 already deleted, retry=false 2024-11-16T08:37:13,448 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c27dd56784bd,44603,1731746189510 expired; onlineServers=1 2024-11-16T08:37:13,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44603-0x10142c9c5fb0002, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:13,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44603-0x10142c9c5fb0002, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:13,472 INFO [RS:1;c27dd56784bd:44603 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:37:13,472 INFO [RS:1;c27dd56784bd:44603 {}] regionserver.HRegionServer(1031): Exiting; stopping=c27dd56784bd,44603,1731746189510; zookeeper connection closed. 2024-11-16T08:37:13,476 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6eb33582 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6eb33582 2024-11-16T08:37:13,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,243 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-16T08:37:14,254 DEBUG [RS:0;c27dd56784bd:34739 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs 2024-11-16T08:37:14,254 INFO [RS:0;c27dd56784bd:34739 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C34739%2C1731746188041.meta:.meta(num 1731746228234) 2024-11-16T08:37:14,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:14,272 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:14,272 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:14,273 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:14,273 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:14,273 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:14,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741892_1076 (size=13514) 2024-11-16T08:37:14,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741892_1076 (size=13514) 2024-11-16T08:37:14,290 DEBUG [RS:0;c27dd56784bd:34739 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/oldWALs 2024-11-16T08:37:14,290 INFO [RS:0;c27dd56784bd:34739 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C34739%2C1731746188041:(num 1731746227871) 2024-11-16T08:37:14,290 DEBUG [RS:0;c27dd56784bd:34739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:14,290 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:37:14,290 INFO [RS:0;c27dd56784bd:34739 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:37:14,290 INFO [RS:0;c27dd56784bd:34739 {}] hbase.ChoreService(370): Chore service for: regionserver/c27dd56784bd:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T08:37:14,290 INFO [RS:0;c27dd56784bd:34739 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:37:14,291 INFO [RS:0;c27dd56784bd:34739 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34739 2024-11-16T08:37:14,291 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:37:14,329 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c27dd56784bd,34739,1731746188041 2024-11-16T08:37:14,329 INFO [RS:0;c27dd56784bd:34739 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:37:14,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:37:14,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:14,348 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c27dd56784bd,34739,1731746188041] 2024-11-16T08:37:14,422 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c27dd56784bd,34739,1731746188041 already deleted, retry=false 2024-11-16T08:37:14,422 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c27dd56784bd,34739,1731746188041 expired; onlineServers=0 2024-11-16T08:37:14,422 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c27dd56784bd,44433,1731746187873' ***** 2024-11-16T08:37:14,422 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T08:37:14,422 INFO [M:0;c27dd56784bd:44433 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:37:14,423 INFO [M:0;c27dd56784bd:44433 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:37:14,423 DEBUG [M:0;c27dd56784bd:44433 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T08:37:14,423 DEBUG [M:0;c27dd56784bd:44433 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T08:37:14,423 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746188410 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746188410,5,FailOnTimeoutGroup] 2024-11-16T08:37:14,423 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T08:37:14,423 INFO [M:0;c27dd56784bd:44433 {}] hbase.ChoreService(370): Chore service for: master/c27dd56784bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T08:37:14,423 INFO [M:0;c27dd56784bd:44433 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:37:14,423 DEBUG [M:0;c27dd56784bd:44433 {}] master.HMaster(1795): Stopping service threads 2024-11-16T08:37:14,423 INFO [M:0;c27dd56784bd:44433 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T08:37:14,423 INFO [M:0;c27dd56784bd:44433 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:37:14,423 INFO [M:0;c27dd56784bd:44433 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T08:37:14,424 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746188410 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746188410,5,FailOnTimeoutGroup] 2024-11-16T08:37:14,424 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T08:37:14,450 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:14,450 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34739-0x10142c9c5fb0001, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:14,450 INFO [RS:0;c27dd56784bd:34739 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:37:14,450 INFO [RS:0;c27dd56784bd:34739 {}] regionserver.HRegionServer(1031): Exiting; stopping=c27dd56784bd,34739,1731746188041; zookeeper connection closed. 2024-11-16T08:37:14,451 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@36efa43d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@36efa43d 2024-11-16T08:37:14,452 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-16T08:37:14,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T08:37:14,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:14,559 DEBUG [M:0;c27dd56784bd:44433 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-16T08:37:14,559 DEBUG [M:0;c27dd56784bd:44433 {}] master.ActiveMasterManager(353): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-16T08:37:14,560 INFO [M:0;c27dd56784bd:44433 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/.lastflushedseqids 2024-11-16T08:37:14,570 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T08:37:14,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741902_1088 (size=130) 2024-11-16T08:37:14,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741902_1088 (size=130) 2024-11-16T08:37:14,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:14,750 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7663a78e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1154287115-172.17.0.3-1731746185679:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:43199,null,null]) java.net.ConnectException: Call From c27dd56784bd/172.17.0.3 to localhost:37695 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T08:37:15,005 INFO [M:0;c27dd56784bd:44433 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T08:37:15,008 INFO [M:0;c27dd56784bd:44433 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T08:37:15,008 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:37:15,008 INFO [M:0;c27dd56784bd:44433 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:15,008 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:15,009 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:37:15,009 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:15,009 INFO [M:0;c27dd56784bd:44433 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-11-16T08:37:15,039 DEBUG [M:0;c27dd56784bd:44433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc3621f2ea114853b3636c24e8deec10 is 82, key is hbase:meta,,1/info:regioninfo/1731746189272/Put/seqid=0 2024-11-16T08:37:15,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741903_1089 (size=5672) 2024-11-16T08:37:15,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741903_1089 (size=5672) 2024-11-16T08:37:15,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:15,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:15,456 INFO [M:0;c27dd56784bd:44433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc3621f2ea114853b3636c24e8deec10 2024-11-16T08:37:15,483 DEBUG [M:0;c27dd56784bd:44433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2ab2486d7b8f4faaa21ec8bae096ccd0 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731746190101/Put/seqid=0 2024-11-16T08:37:15,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741904_1090 (size=6254) 2024-11-16T08:37:15,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741904_1090 (size=6254) 2024-11-16T08:37:15,504 INFO [M:0;c27dd56784bd:44433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2ab2486d7b8f4faaa21ec8bae096ccd0 2024-11-16T08:37:15,520 INFO [M:0;c27dd56784bd:44433 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2ab2486d7b8f4faaa21ec8bae096ccd0 2024-11-16T08:37:15,544 DEBUG [M:0;c27dd56784bd:44433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e7d9085d6e544a27a2e5ea0259004921 is 69, key is c27dd56784bd,34739,1731746188041/rs:state/1731746188508/Put/seqid=0 2024-11-16T08:37:15,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741905_1091 (size=5224) 2024-11-16T08:37:15,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741905_1091 (size=5224) 2024-11-16T08:37:15,574 INFO [M:0;c27dd56784bd:44433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e7d9085d6e544a27a2e5ea0259004921 2024-11-16T08:37:15,608 DEBUG [M:0;c27dd56784bd:44433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b007d2dd6874acaa079ab4480186113 is 52, key is load_balancer_on/state:d/1731746189491/Put/seqid=0 2024-11-16T08:37:15,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741906_1092 (size=5056) 2024-11-16T08:37:15,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741906_1092 (size=5056) 2024-11-16T08:37:16,029 INFO [M:0;c27dd56784bd:44433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b007d2dd6874acaa079ab4480186113 2024-11-16T08:37:16,046 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc3621f2ea114853b3636c24e8deec10 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc3621f2ea114853b3636c24e8deec10 2024-11-16T08:37:16,055 INFO [M:0;c27dd56784bd:44433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc3621f2ea114853b3636c24e8deec10, entries=8, sequenceid=60, filesize=5.5 K 2024-11-16T08:37:16,057 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2ab2486d7b8f4faaa21ec8bae096ccd0 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2ab2486d7b8f4faaa21ec8bae096ccd0 2024-11-16T08:37:16,064 INFO [M:0;c27dd56784bd:44433 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2ab2486d7b8f4faaa21ec8bae096ccd0 2024-11-16T08:37:16,065 INFO [M:0;c27dd56784bd:44433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2ab2486d7b8f4faaa21ec8bae096ccd0, entries=6, sequenceid=60, filesize=6.1 K 2024-11-16T08:37:16,067 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e7d9085d6e544a27a2e5ea0259004921 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e7d9085d6e544a27a2e5ea0259004921 2024-11-16T08:37:16,084 INFO [M:0;c27dd56784bd:44433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e7d9085d6e544a27a2e5ea0259004921, entries=2, sequenceid=60, filesize=5.1 K 2024-11-16T08:37:16,086 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b007d2dd6874acaa079ab4480186113 as hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9b007d2dd6874acaa079ab4480186113 2024-11-16T08:37:16,093 INFO [M:0;c27dd56784bd:44433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9b007d2dd6874acaa079ab4480186113, entries=1, sequenceid=60, filesize=4.9 K 2024-11-16T08:37:16,095 INFO [M:0;c27dd56784bd:44433 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1085ms, sequenceid=60, compaction requested=false 2024-11-16T08:37:16,104 INFO [M:0;c27dd56784bd:44433 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:16,104 DEBUG [M:0;c27dd56784bd:44433 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746235008Disabling compacts and flushes for region at 1731746235008Disabling writes for close at 1731746235009 (+1 ms)Obtaining lock to block concurrent updates at 1731746235009Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731746235009Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1731746235009Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731746235016 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731746235017 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731746235039 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731746235039Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731746235463 (+424 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731746235482 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731746235482Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731746235520 (+38 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731746235544 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731746235544Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731746235584 (+40 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731746235607 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731746235607Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69e447ee: reopening flushed file at 1731746236044 (+437 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cc110e8: reopening flushed file at 1731746236055 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e453593: reopening flushed file at 1731746236065 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@343127c8: reopening flushed file at 1731746236084 (+19 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1085ms, sequenceid=60, compaction requested=false at 1731746236095 (+11 ms)Writing region close event to WAL at 1731746236104 (+9 ms)Closed at 1731746236104 2024-11-16T08:37:16,110 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:16,112 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:16,112 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:16,112 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:16,112 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:16,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43207 is added to blk_1073741890_1073 (size=1045) 2024-11-16T08:37:16,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33283 is added to blk_1073741890_1073 (size=1045) 2024-11-16T08:37:16,119 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:37:16,119 INFO [M:0;c27dd56784bd:44433 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T08:37:16,119 INFO [M:0;c27dd56784bd:44433 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44433 2024-11-16T08:37:16,120 INFO [M:0;c27dd56784bd:44433 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:37:16,249 INFO [M:0;c27dd56784bd:44433 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:37:16,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:16,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44433-0x10142c9c5fb0000, quorum=127.0.0.1:54424, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:16,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26e43435{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:16,264 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34e71a9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:16,264 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:16,264 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41a1c5f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:16,264 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c8ca2dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:16,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:16,270 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:37:16,270 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:37:16,269 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@eb9c1c8 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1154287115-172.17.0.3-1731746185679:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:43199,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:37695 , LocalHost:localPort c27dd56784bd/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-16T08:37:16,270 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:37:16,270 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1154287115-172.17.0.3-1731746185679 (Datanode Uuid 37c37cb5-bfce-4659-be37-7fdcb648c3f2) service to localhost/127.0.0.1:34591 2024-11-16T08:37:16,271 ERROR [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@eb9c1c8 {}] datanode.DataNode(1743): Cannot find BPOfferService for reporting block received for bpid=BP-1154287115-172.17.0.3-1731746185679 2024-11-16T08:37:16,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data4/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:16,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data3/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:16,274 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:37:16,277 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@eb9c1c8 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:43207,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1154287115-172.17.0.3-1731746185679 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:16,277 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@eb9c1c8 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:43199,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1154287115-172.17.0.3-1731746185679 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:16,277 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@eb9c1c8 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:43207,null,null], DatanodeInfoWithStorage[127.0.0.1:43199,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1154287115-172.17.0.3-1731746185679:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:43207,null,null], DatanodeInfoWithStorage[127.0.0.1:43199,null,null]] 2024-11-16T08:37:16,293 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f0a2519{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:16,294 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68004957{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:16,294 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:16,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719bbb9b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:16,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fb911ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:16,309 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:37:16,310 WARN [BP-1154287115-172.17.0.3-1731746185679 heartbeating to localhost/127.0.0.1:34591 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1154287115-172.17.0.3-1731746185679 (Datanode Uuid 7b100a5f-d7ff-4a39-a6c3-8e6a025bd9ae) service to localhost/127.0.0.1:34591 2024-11-16T08:37:16,311 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data6/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:16,312 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:37:16,312 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:37:16,312 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:37:16,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/cluster_1caea84c-2fc8-bf30-06ee-9cc524e168fa/data/data5/current/BP-1154287115-172.17.0.3-1731746185679 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:16,329 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2606b08f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:37:16,337 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c053989{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:16,337 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:16,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3150e6db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:16,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa07d80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:16,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:16,362 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T08:37:16,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T08:37:16,420 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:37163 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34591 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34591 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$901/0x00007faa10bf52a8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34591 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34591 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34591 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37163 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:34591 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34591 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34591 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:34591 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:34591 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$901/0x00007faa10bf52a8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34591 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=448 (was 400) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=409 (was 242) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3521 (was 2432) - AvailableMemoryMB LEAK? - 2024-11-16T08:37:16,441 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=448, MaxFileDescriptor=1048576, SystemLoadAverage=409, ProcessCount=11, AvailableMemoryMB=3521 2024-11-16T08:37:16,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T08:37:16,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.log.dir so I do NOT create it in target/test-data/e4294179-bc89-904f-f726-7444b54eab0f 2024-11-16T08:37:16,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/894b2a8c-8f2a-f64e-3f15-926fcea3ed78/hadoop.tmp.dir so I do NOT create it in target/test-data/e4294179-bc89-904f-f726-7444b54eab0f 2024-11-16T08:37:16,442 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566, deleteOnExit=true 2024-11-16T08:37:16,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T08:37:16,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/test.cache.data in system properties and HBase conf 2024-11-16T08:37:16,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T08:37:16,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir in system properties and HBase conf 2024-11-16T08:37:16,442 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T08:37:16,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T08:37:16,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T08:37:16,443 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T08:37:16,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:37:16,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:37:16,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T08:37:16,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/nfs.dump.dir in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/java.io.tmpdir in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T08:37:16,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T08:37:16,461 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:37:16,901 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:16,911 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:16,924 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:16,924 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:16,924 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:37:16,927 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:16,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53effb5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:16,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c2762d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:17,036 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@708201bd{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/java.io.tmpdir/jetty-localhost-34729-hadoop-hdfs-3_4_1-tests_jar-_-any-10682330992960694799/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:37:17,037 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11255fea{HTTP/1.1, (http/1.1)}{localhost:34729} 2024-11-16T08:37:17,037 INFO [Time-limited test {}] server.Server(415): Started @155273ms 2024-11-16T08:37:17,051 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:37:17,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:17,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:17,845 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:17,853 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:17,866 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:17,866 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:17,867 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:37:17,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a48749e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:17,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62bbed65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:18,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ea5a7a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/java.io.tmpdir/jetty-localhost-43345-hadoop-hdfs-3_4_1-tests_jar-_-any-317773017314561812/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:18,014 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47ffea33{HTTP/1.1, (http/1.1)}{localhost:43345} 2024-11-16T08:37:18,014 INFO [Time-limited test {}] server.Server(415): Started @156250ms 2024-11-16T08:37:18,016 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:37:18,121 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:18,128 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:18,133 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:18,133 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:18,133 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:37:18,137 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50b10539{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:18,137 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60deb4a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:18,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a1758ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/java.io.tmpdir/jetty-localhost-38387-hadoop-hdfs-3_4_1-tests_jar-_-any-2319243250978274777/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:18,260 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6144f613{HTTP/1.1, (http/1.1)}{localhost:38387} 2024-11-16T08:37:18,260 INFO [Time-limited test {}] server.Server(415): Started @156496ms 2024-11-16T08:37:18,262 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:37:18,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:18,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:19,272 WARN [Thread-1188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data1/current/BP-921546855-172.17.0.3-1731746236475/current, will proceed with Du for space computation calculation, 2024-11-16T08:37:19,272 WARN [Thread-1189 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data2/current/BP-921546855-172.17.0.3-1731746236475/current, will proceed with Du for space computation calculation, 2024-11-16T08:37:19,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:19,297 WARN [Thread-1152 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:37:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18afacb4587e1b27 with lease ID 0x55f22628a93a7f50: Processing first storage report for DS-9a89e58d-9857-4b31-be9b-41364b271d9b from datanode DatanodeRegistration(127.0.0.1:36335, datanodeUuid=882be36c-b551-492c-9d5c-f23c55439f3e, infoPort=42253, infoSecurePort=0, ipcPort=44525, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475) 2024-11-16T08:37:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18afacb4587e1b27 with lease ID 0x55f22628a93a7f50: from storage DS-9a89e58d-9857-4b31-be9b-41364b271d9b node DatanodeRegistration(127.0.0.1:36335, datanodeUuid=882be36c-b551-492c-9d5c-f23c55439f3e, infoPort=42253, infoSecurePort=0, ipcPort=44525, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18afacb4587e1b27 with lease ID 0x55f22628a93a7f50: Processing first storage report for DS-7a077a4e-530c-4bb4-a3e1-aaa2a4f98717 from datanode DatanodeRegistration(127.0.0.1:36335, datanodeUuid=882be36c-b551-492c-9d5c-f23c55439f3e, infoPort=42253, infoSecurePort=0, ipcPort=44525, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475) 2024-11-16T08:37:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18afacb4587e1b27 with lease ID 0x55f22628a93a7f50: from storage DS-7a077a4e-530c-4bb4-a3e1-aaa2a4f98717 node DatanodeRegistration(127.0.0.1:36335, datanodeUuid=882be36c-b551-492c-9d5c-f23c55439f3e, infoPort=42253, infoSecurePort=0, ipcPort=44525, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T08:37:19,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:19,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T08:37:19,571 WARN [Thread-1199 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data3/current/BP-921546855-172.17.0.3-1731746236475/current, will proceed with Du for space computation calculation, 2024-11-16T08:37:19,576 WARN [Thread-1200 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data4/current/BP-921546855-172.17.0.3-1731746236475/current, will proceed with Du for space computation calculation, 2024-11-16T08:37:19,606 WARN [Thread-1175 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:37:19,608 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc77dc1dd97d22508 with lease ID 0x55f22628a93a7f51: Processing first storage report for DS-e17595d2-9167-4def-b028-0973e110976b from datanode DatanodeRegistration(127.0.0.1:39999, datanodeUuid=6ca2f07c-8fe0-461e-81df-db1309519ed2, infoPort=43313, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475) 2024-11-16T08:37:19,608 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc77dc1dd97d22508 with lease ID 0x55f22628a93a7f51: from storage DS-e17595d2-9167-4def-b028-0973e110976b node DatanodeRegistration(127.0.0.1:39999, datanodeUuid=6ca2f07c-8fe0-461e-81df-db1309519ed2, infoPort=43313, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:19,608 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc77dc1dd97d22508 with lease ID 0x55f22628a93a7f51: Processing first storage report for DS-e9c62e18-b488-4404-a6cb-911672c6b333 from datanode DatanodeRegistration(127.0.0.1:39999, datanodeUuid=6ca2f07c-8fe0-461e-81df-db1309519ed2, infoPort=43313, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475) 2024-11-16T08:37:19,608 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc77dc1dd97d22508 with lease ID 0x55f22628a93a7f51: from storage DS-e9c62e18-b488-4404-a6cb-911672c6b333 node DatanodeRegistration(127.0.0.1:39999, datanodeUuid=6ca2f07c-8fe0-461e-81df-db1309519ed2, infoPort=43313, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:19,637 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f 2024-11-16T08:37:19,642 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/zookeeper_0, clientPort=61509, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T08:37:19,643 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61509 2024-11-16T08:37:19,643 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:19,645 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:19,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36335 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:37:19,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39999 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:37:19,661 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca with version=8 2024-11-16T08:37:19,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/hbase-staging 2024-11-16T08:37:19,663 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:37:19,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:19,664 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:19,664 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:37:19,664 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:19,664 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:37:19,664 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T08:37:19,664 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:37:19,665 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41917 2024-11-16T08:37:19,666 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41917 connecting to ZooKeeper ensemble=127.0.0.1:61509 2024-11-16T08:37:19,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419170x0, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:37:19,726 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41917-0x10142ca90440000 connected 2024-11-16T08:37:19,846 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:19,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:19,849 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:37:19,850 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca, hbase.cluster.distributed=false 2024-11-16T08:37:19,851 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:37:19,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41917 2024-11-16T08:37:19,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41917 2024-11-16T08:37:19,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41917 2024-11-16T08:37:19,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41917 2024-11-16T08:37:19,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41917 2024-11-16T08:37:19,873 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:37:19,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:19,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:19,873 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:37:19,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:19,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:37:19,874 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T08:37:19,874 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:37:19,874 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35969 2024-11-16T08:37:19,877 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35969 connecting to ZooKeeper ensemble=127.0.0.1:61509 2024-11-16T08:37:19,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:19,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:19,894 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359690x0, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:37:19,895 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35969-0x10142ca90440001 connected 2024-11-16T08:37:19,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:37:19,895 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T08:37:19,901 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T08:37:19,902 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T08:37:19,903 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:37:19,905 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35969 2024-11-16T08:37:19,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35969 2024-11-16T08:37:19,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35969 2024-11-16T08:37:19,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35969 2024-11-16T08:37:19,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35969 2024-11-16T08:37:19,921 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c27dd56784bd:41917 2024-11-16T08:37:19,921 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c27dd56784bd,41917,1731746239663 2024-11-16T08:37:19,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:37:19,930 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:37:19,931 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c27dd56784bd,41917,1731746239663 2024-11-16T08:37:19,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:19,940 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T08:37:19,940 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:19,941 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T08:37:19,941 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c27dd56784bd,41917,1731746239663 from backup master directory 2024-11-16T08:37:19,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c27dd56784bd,41917,1731746239663 2024-11-16T08:37:19,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:37:19,951 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:37:19,951 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c27dd56784bd,41917,1731746239663 2024-11-16T08:37:19,951 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:37:19,956 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/hbase.id] with ID: 367a47ba-4ad0-4c9a-89a6-cc213898aa7f 2024-11-16T08:37:19,956 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/.tmp/hbase.id 2024-11-16T08:37:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39999 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:37:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36335 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:37:19,965 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/.tmp/hbase.id]:[hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/hbase.id] 2024-11-16T08:37:19,981 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:19,981 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T08:37:19,983 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T08:37:19,993 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:19,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:20,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36335 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:37:20,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39999 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:37:20,004 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:37:20,005 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T08:37:20,007 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:37:20,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39999 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:37:20,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36335 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:37:20,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:20,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:20,436 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store 2024-11-16T08:37:20,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39999 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:37:20,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36335 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:37:20,450 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:20,450 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:37:20,450 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:20,450 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:20,450 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:37:20,450 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:20,450 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:20,450 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746240450Disabling compacts and flushes for region at 1731746240450Disabling writes for close at 1731746240450Writing region close event to WAL at 1731746240450Closed at 1731746240450 2024-11-16T08:37:20,451 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/.initializing 2024-11-16T08:37:20,451 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663 2024-11-16T08:37:20,454 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C41917%2C1731746239663, suffix=, logDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663, archiveDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/oldWALs, maxLogs=10 2024-11-16T08:37:20,455 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C41917%2C1731746239663.1731746240455 2024-11-16T08:37:20,462 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746240455 2024-11-16T08:37:20,463 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42253:42253),(127.0.0.1/127.0.0.1:43313:43313)] 2024-11-16T08:37:20,468 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:37:20,468 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:20,468 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,468 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T08:37:20,472 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:20,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:20,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T08:37:20,478 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:20,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:37:20,478 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T08:37:20,480 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:20,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:37:20,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T08:37:20,482 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:20,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:37:20,483 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,484 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,484 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,486 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,486 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,486 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T08:37:20,488 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:20,492 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:37:20,493 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=790557, jitterRate=0.00524553656578064}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T08:37:20,494 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731746240469Initializing all the Stores at 1731746240470 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746240470Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746240470Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746240470Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746240470Cleaning up temporary data from old regions at 1731746240486 (+16 ms)Region opened successfully at 1731746240494 (+8 ms) 2024-11-16T08:37:20,494 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T08:37:20,498 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35ea64fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:37:20,499 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T08:37:20,500 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T08:37:20,500 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T08:37:20,500 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T08:37:20,502 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-16T08:37:20,502 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T08:37:20,502 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T08:37:20,511 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T08:37:20,513 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T08:37:20,578 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T08:37:20,579 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T08:37:20,580 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T08:37:20,588 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T08:37:20,589 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T08:37:20,590 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T08:37:20,599 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T08:37:20,601 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T08:37:20,610 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T08:37:20,612 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T08:37:20,624 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T08:37:20,635 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:37:20,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:37:20,635 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:20,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:20,635 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c27dd56784bd,41917,1731746239663, sessionid=0x10142ca90440000, setting cluster-up flag (Was=false) 2024-11-16T08:37:20,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:20,656 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:20,687 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T08:37:20,688 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,41917,1731746239663 2024-11-16T08:37:20,708 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:20,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:20,740 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T08:37:20,741 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,41917,1731746239663 2024-11-16T08:37:20,742 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T08:37:20,744 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T08:37:20,744 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T08:37:20,744 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T08:37:20,744 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c27dd56784bd,41917,1731746239663 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T08:37:20,746 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:37:20,746 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:37:20,746 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:37:20,746 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:37:20,746 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c27dd56784bd:0, corePoolSize=10, maxPoolSize=10 2024-11-16T08:37:20,746 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,746 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:37:20,746 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,747 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731746270747 2024-11-16T08:37:20,747 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T08:37:20,747 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T08:37:20,747 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T08:37:20,747 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T08:37:20,747 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T08:37:20,747 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T08:37:20,747 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,748 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T08:37:20,748 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T08:37:20,748 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:37:20,748 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T08:37:20,748 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T08:37:20,748 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T08:37:20,748 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T08:37:20,748 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746240748,5,FailOnTimeoutGroup] 2024-11-16T08:37:20,749 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746240749,5,FailOnTimeoutGroup] 2024-11-16T08:37:20,749 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,749 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T08:37:20,749 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,749 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,749 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:20,749 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T08:37:20,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39999 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:37:20,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36335 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:37:20,758 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T08:37:20,758 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca 2024-11-16T08:37:20,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39999 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:37:20,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36335 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:37:20,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:20,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:37:20,780 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:37:20,780 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:20,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:20,781 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:37:20,783 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:37:20,783 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:20,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:20,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:37:20,785 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:37:20,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:20,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:20,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:37:20,787 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:37:20,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:20,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:20,788 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:37:20,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740 2024-11-16T08:37:20,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740 2024-11-16T08:37:20,791 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:37:20,791 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:37:20,791 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:37:20,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:37:20,795 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:37:20,795 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709757, jitterRate=-0.09749779105186462}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:37:20,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731746240777Initializing all the Stores at 1731746240778 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746240778Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746240778Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746240778Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746240778Cleaning up temporary data from old regions at 1731746240791 (+13 ms)Region opened successfully at 1731746240796 (+5 ms) 2024-11-16T08:37:20,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:37:20,796 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:37:20,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:37:20,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:37:20,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:37:20,797 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:37:20,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746240796Disabling compacts and flushes for region at 1731746240796Disabling writes for close at 1731746240796Writing region close event to WAL at 1731746240797 (+1 ms)Closed at 1731746240797 2024-11-16T08:37:20,798 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:37:20,798 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T08:37:20,799 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T08:37:20,800 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:37:20,802 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T08:37:20,811 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(746): ClusterId : 367a47ba-4ad0-4c9a-89a6-cc213898aa7f 2024-11-16T08:37:20,811 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T08:37:20,821 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T08:37:20,821 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T08:37:20,832 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T08:37:20,832 DEBUG [RS:0;c27dd56784bd:35969 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@326b4795, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:37:20,845 DEBUG [RS:0;c27dd56784bd:35969 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c27dd56784bd:35969 2024-11-16T08:37:20,846 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T08:37:20,846 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T08:37:20,846 DEBUG [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T08:37:20,847 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(2659): reportForDuty to master=c27dd56784bd,41917,1731746239663 with port=35969, startcode=1731746239872 2024-11-16T08:37:20,847 DEBUG [RS:0;c27dd56784bd:35969 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T08:37:20,850 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33283, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T08:37:20,850 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41917 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c27dd56784bd,35969,1731746239872 2024-11-16T08:37:20,851 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41917 {}] master.ServerManager(517): Registering regionserver=c27dd56784bd,35969,1731746239872 2024-11-16T08:37:20,853 DEBUG [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca 2024-11-16T08:37:20,853 DEBUG [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40709 2024-11-16T08:37:20,853 DEBUG [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T08:37:20,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:37:20,863 DEBUG [RS:0;c27dd56784bd:35969 {}] zookeeper.ZKUtil(111): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c27dd56784bd,35969,1731746239872 2024-11-16T08:37:20,863 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c27dd56784bd,35969,1731746239872] 2024-11-16T08:37:20,863 WARN [RS:0;c27dd56784bd:35969 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:37:20,863 INFO [RS:0;c27dd56784bd:35969 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:37:20,864 DEBUG [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872 2024-11-16T08:37:20,868 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T08:37:20,872 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T08:37:20,876 INFO [RS:0;c27dd56784bd:35969 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T08:37:20,876 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,877 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T08:37:20,878 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T08:37:20,879 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,879 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,880 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,880 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:20,880 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:37:20,880 DEBUG [RS:0;c27dd56784bd:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:37:20,882 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,882 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,882 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,882 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,883 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,883 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,35969,1731746239872-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:37:20,900 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T08:37:20,900 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,35969,1731746239872-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,900 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,900 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.Replication(171): c27dd56784bd,35969,1731746239872 started 2024-11-16T08:37:20,914 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:20,914 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(1482): Serving as c27dd56784bd,35969,1731746239872, RpcServer on c27dd56784bd/172.17.0.3:35969, sessionid=0x10142ca90440001 2024-11-16T08:37:20,914 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T08:37:20,914 DEBUG [RS:0;c27dd56784bd:35969 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c27dd56784bd,35969,1731746239872 2024-11-16T08:37:20,915 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,35969,1731746239872' 2024-11-16T08:37:20,915 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T08:37:20,915 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T08:37:20,916 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T08:37:20,916 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T08:37:20,916 DEBUG [RS:0;c27dd56784bd:35969 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c27dd56784bd,35969,1731746239872 2024-11-16T08:37:20,916 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,35969,1731746239872' 2024-11-16T08:37:20,916 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T08:37:20,916 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T08:37:20,917 DEBUG [RS:0;c27dd56784bd:35969 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T08:37:20,917 INFO [RS:0;c27dd56784bd:35969 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T08:37:20,917 INFO [RS:0;c27dd56784bd:35969 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T08:37:20,952 WARN [c27dd56784bd:41917 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T08:37:21,019 INFO [RS:0;c27dd56784bd:35969 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C35969%2C1731746239872, suffix=, logDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872, archiveDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/oldWALs, maxLogs=32 2024-11-16T08:37:21,020 INFO [RS:0;c27dd56784bd:35969 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C35969%2C1731746239872.1731746241020 2024-11-16T08:37:21,030 INFO [RS:0;c27dd56784bd:35969 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 2024-11-16T08:37:21,031 DEBUG [RS:0;c27dd56784bd:35969 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42253:42253),(127.0.0.1/127.0.0.1:43313:43313)] 2024-11-16T08:37:21,202 DEBUG [c27dd56784bd:41917 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T08:37:21,203 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c27dd56784bd,35969,1731746239872 2024-11-16T08:37:21,204 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,35969,1731746239872, state=OPENING 2024-11-16T08:37:21,214 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T08:37:21,224 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:21,225 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:37:21,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:21,226 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:37:21,229 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:37:21,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,35969,1731746239872}] 2024-11-16T08:37:21,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:21,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:21,391 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T08:37:21,394 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55229, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T08:37:21,404 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T08:37:21,404 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:37:21,413 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C35969%2C1731746239872.meta, suffix=.meta, logDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872, archiveDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/oldWALs, maxLogs=32 2024-11-16T08:37:21,414 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta 2024-11-16T08:37:21,445 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta 2024-11-16T08:37:21,465 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43313:43313),(127.0.0.1/127.0.0.1:42253:42253)] 2024-11-16T08:37:21,489 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:37:21,489 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T08:37:21,489 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T08:37:21,489 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T08:37:21,489 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T08:37:21,490 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:21,490 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T08:37:21,490 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T08:37:21,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:37:21,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:37:21,494 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:21,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:21,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:37:21,498 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:37:21,499 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:21,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:21,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:37:21,501 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:37:21,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:21,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:21,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:37:21,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:37:21,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:21,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:21,503 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:37:21,504 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740 2024-11-16T08:37:21,506 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740 2024-11-16T08:37:21,507 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:37:21,507 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:37:21,507 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:37:21,512 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:37:21,513 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750070, jitterRate=-0.04623711109161377}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:37:21,513 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T08:37:21,514 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731746241490Writing region info on filesystem at 1731746241490Initializing all the Stores at 1731746241491 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746241491Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746241492 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746241492Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746241492Cleaning up temporary data from old regions at 1731746241507 (+15 ms)Running coprocessor post-open hooks at 1731746241513 (+6 ms)Region opened successfully at 1731746241513 2024-11-16T08:37:21,515 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731746241391 2024-11-16T08:37:21,518 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T08:37:21,518 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T08:37:21,518 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,35969,1731746239872 2024-11-16T08:37:21,520 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,35969,1731746239872, state=OPEN 2024-11-16T08:37:21,580 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:37:21,580 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c27dd56784bd,35969,1731746239872 2024-11-16T08:37:21,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:37:21,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:37:21,584 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:37:21,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T08:37:21,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,35969,1731746239872 in 352 msec 2024-11-16T08:37:21,590 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T08:37:21,590 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 787 msec 2024-11-16T08:37:21,591 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:37:21,591 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T08:37:21,601 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:37:21,601 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,35969,1731746239872, seqNum=-1] 2024-11-16T08:37:21,601 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:37:21,603 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35097, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:37:21,612 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 867 msec 2024-11-16T08:37:21,613 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731746241613, completionTime=-1 2024-11-16T08:37:21,613 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T08:37:21,613 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T08:37:21,615 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T08:37:21,615 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731746301615 2024-11-16T08:37:21,615 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731746361615 2024-11-16T08:37:21,615 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T08:37:21,616 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41917,1731746239663-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:21,616 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41917,1731746239663-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:21,616 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41917,1731746239663-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:21,616 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c27dd56784bd:41917, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:21,616 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:21,616 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:21,619 DEBUG [master/c27dd56784bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T08:37:21,621 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.670sec 2024-11-16T08:37:21,621 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T08:37:21,621 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T08:37:21,621 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T08:37:21,621 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T08:37:21,621 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T08:37:21,621 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41917,1731746239663-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:37:21,621 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41917,1731746239663-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T08:37:21,625 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T08:37:21,625 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T08:37:21,625 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41917,1731746239663-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:21,712 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55b48cd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:37:21,712 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c27dd56784bd,41917,-1 for getting cluster id 2024-11-16T08:37:21,712 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T08:37:21,717 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '367a47ba-4ad0-4c9a-89a6-cc213898aa7f' 2024-11-16T08:37:21,717 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T08:37:21,718 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "367a47ba-4ad0-4c9a-89a6-cc213898aa7f" 2024-11-16T08:37:21,718 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5493648b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:37:21,718 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c27dd56784bd,41917,-1] 2024-11-16T08:37:21,718 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T08:37:21,724 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:21,726 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55028, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T08:37:21,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@419bc4ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:37:21,728 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:37:21,729 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,35969,1731746239872, seqNum=-1] 2024-11-16T08:37:21,730 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:37:21,732 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37928, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:37:21,734 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c27dd56784bd,41917,1731746239663 2024-11-16T08:37:21,735 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:21,738 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T08:37:21,738 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-16T08:37:21,738 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-16T08:37:21,738 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T08:37:21,739 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is c27dd56784bd,41917,1731746239663 2024-11-16T08:37:21,739 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@8e26c8a 2024-11-16T08:37:21,739 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T08:37:21,742 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55036, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T08:37:21,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41917 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T08:37:21,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41917 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T08:37:21,743 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41917 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:37:21,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41917 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T08:37:21,746 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T08:37:21,747 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:21,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41917 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-16T08:37:21,749 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T08:37:21,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41917 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:37:21,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36335 is added to blk_1073741835_1011 (size=395) 2024-11-16T08:37:21,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39999 is added to blk_1073741835_1011 (size=395) 2024-11-16T08:37:21,790 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c8f15dc6acba8171e6b5a3f4c7ba87c7, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca 2024-11-16T08:37:21,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39999 is added to blk_1073741836_1012 (size=78) 2024-11-16T08:37:21,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36335 is added to blk_1073741836_1012 (size=78) 2024-11-16T08:37:21,814 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:21,814 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing c8f15dc6acba8171e6b5a3f4c7ba87c7, disabling compactions & flushes 2024-11-16T08:37:21,814 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:21,814 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:21,814 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. after waiting 0 ms 2024-11-16T08:37:21,814 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:21,815 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:21,815 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for c8f15dc6acba8171e6b5a3f4c7ba87c7: Waiting for close lock at 1731746241814Disabling compacts and flushes for region at 1731746241814Disabling writes for close at 1731746241814Writing region close event to WAL at 1731746241814Closed at 1731746241814 2024-11-16T08:37:21,816 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T08:37:21,817 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731746241817"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731746241817"}]},"ts":"1731746241817"} 2024-11-16T08:37:21,822 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T08:37:21,824 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T08:37:21,825 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746241824"}]},"ts":"1731746241824"} 2024-11-16T08:37:21,827 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-16T08:37:21,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c8f15dc6acba8171e6b5a3f4c7ba87c7, ASSIGN}] 2024-11-16T08:37:21,831 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c8f15dc6acba8171e6b5a3f4c7ba87c7, ASSIGN 2024-11-16T08:37:21,836 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c8f15dc6acba8171e6b5a3f4c7ba87c7, ASSIGN; state=OFFLINE, location=c27dd56784bd,35969,1731746239872; forceNewPlan=false, retain=false 2024-11-16T08:37:21,988 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c8f15dc6acba8171e6b5a3f4c7ba87c7, regionState=OPENING, regionLocation=c27dd56784bd,35969,1731746239872 2024-11-16T08:37:21,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c8f15dc6acba8171e6b5a3f4c7ba87c7, ASSIGN because future has completed 2024-11-16T08:37:21,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8f15dc6acba8171e6b5a3f4c7ba87c7, server=c27dd56784bd,35969,1731746239872}] 2024-11-16T08:37:22,164 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:22,164 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c8f15dc6acba8171e6b5a3f4c7ba87c7, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:37:22,165 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,165 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:22,165 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,165 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,174 INFO [StoreOpener-c8f15dc6acba8171e6b5a3f4c7ba87c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,185 INFO [StoreOpener-c8f15dc6acba8171e6b5a3f4c7ba87c7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8f15dc6acba8171e6b5a3f4c7ba87c7 columnFamilyName info 2024-11-16T08:37:22,185 DEBUG [StoreOpener-c8f15dc6acba8171e6b5a3f4c7ba87c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:22,186 INFO [StoreOpener-c8f15dc6acba8171e6b5a3f4c7ba87c7-1 {}] regionserver.HStore(327): Store=c8f15dc6acba8171e6b5a3f4c7ba87c7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:37:22,186 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,187 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,188 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,189 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,189 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,191 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,205 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/c8f15dc6acba8171e6b5a3f4c7ba87c7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:37:22,206 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c8f15dc6acba8171e6b5a3f4c7ba87c7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854474, jitterRate=0.08652035892009735}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T08:37:22,206 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:22,206 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c8f15dc6acba8171e6b5a3f4c7ba87c7: Running coprocessor pre-open hook at 1731746242165Writing region info on filesystem at 1731746242165Initializing all the Stores at 1731746242173 (+8 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746242173Cleaning up temporary data from old regions at 1731746242189 (+16 ms)Running coprocessor post-open hooks at 1731746242206 (+17 ms)Region opened successfully at 1731746242206 2024-11-16T08:37:22,208 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7., pid=6, masterSystemTime=1731746242148 2024-11-16T08:37:22,212 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c8f15dc6acba8171e6b5a3f4c7ba87c7, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,35969,1731746239872 2024-11-16T08:37:22,213 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:22,213 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:22,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8f15dc6acba8171e6b5a3f4c7ba87c7, server=c27dd56784bd,35969,1731746239872 because future has completed 2024-11-16T08:37:22,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T08:37:22,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c8f15dc6acba8171e6b5a3f4c7ba87c7, server=c27dd56784bd,35969,1731746239872 in 227 msec 2024-11-16T08:37:22,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T08:37:22,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=c8f15dc6acba8171e6b5a3f4c7ba87c7, ASSIGN in 396 msec 2024-11-16T08:37:22,229 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T08:37:22,229 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746242229"}]},"ts":"1731746242229"} 2024-11-16T08:37:22,233 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-16T08:37:22,237 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T08:37:22,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 494 msec 2024-11-16T08:37:22,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:22,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:23,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:23,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:24,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:24,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:24,923 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T08:37:24,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:24,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:24,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:24,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:24,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:24,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:24,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:24,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:24,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:24,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:25,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:25,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:26,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:26,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:26,868 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T08:37:26,868 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-16T08:37:27,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:27,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:28,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:28,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:29,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:29,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:29,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T08:37:29,419 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T08:37:29,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T08:37:29,419 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-16T08:37:29,420 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:37:29,420 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T08:37:29,420 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-16T08:37:29,420 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-16T08:37:30,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:30,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:31,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:31,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:31,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41917 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:37:31,793 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-16T08:37:31,793 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-16T08:37:31,796 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T08:37:31,796 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:31,800 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7., hostname=c27dd56784bd,35969,1731746239872, seqNum=2] 2024-11-16T08:37:32,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:32,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:33,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:33,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:33,803 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 2024-11-16T08:37:33,804 WARN [ResponseProcessor for block BP-921546855-172.17.0.3-1731746236475:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-921546855-172.17.0.3-1731746236475:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:33,804 WARN [ResponseProcessor for block BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:39999,DS-e17595d2-9167-4def-b028-0973e110976b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:33,804 WARN [DataStreamer for file /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta block BP-921546855-172.17.0.3-1731746236475:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-921546855-172.17.0.3-1731746236475:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39999,DS-e17595d2-9167-4def-b028-0973e110976b,DISK], DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39999,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]) is bad. 2024-11-16T08:37:33,805 WARN [DataStreamer for file /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 block BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK], DatanodeInfoWithStorage[127.0.0.1:39999,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39999,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]) is bad. 2024-11-16T08:37:33,805 WARN [PacketResponder: BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39999] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:33,806 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_111946686_22 at /127.0.0.1:45082 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36335:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45082 dst: /127.0.0.1:36335 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:33,806 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_111946686_22 at /127.0.0.1:43490 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39999:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43490 dst: /127.0.0.1:39999 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:33,806 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_111946686_22 at /127.0.0.1:45086 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36335:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45086 dst: /127.0.0.1:36335 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:33,808 WARN [ResponseProcessor for block BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:39999,DS-e17595d2-9167-4def-b028-0973e110976b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:33,808 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_111946686_22 at /127.0.0.1:43502 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39999:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43502 dst: /127.0.0.1:39999 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:33,808 WARN [DataStreamer for file /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746240455 block BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK], DatanodeInfoWithStorage[127.0.0.1:39999,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39999,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]) is bad. 2024-11-16T08:37:33,808 WARN [PacketResponder: BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39999] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:33,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-178266705_22 at /127.0.0.1:45058 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36335:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45058 dst: /127.0.0.1:36335 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:33,809 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-178266705_22 at /127.0.0.1:43466 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39999:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43466 dst: /127.0.0.1:39999 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:33,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a1758ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:33,845 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6144f613{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:33,845 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:33,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60deb4a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:33,845 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50b10539{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:33,846 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:37:33,846 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:37:33,847 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-921546855-172.17.0.3-1731746236475 (Datanode Uuid 6ca2f07c-8fe0-461e-81df-db1309519ed2) service to localhost/127.0.0.1:40709 2024-11-16T08:37:33,847 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:37:33,847 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data3/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:33,848 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:37:33,848 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data4/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:33,859 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:33,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:33,872 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:33,872 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:33,872 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:37:33,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f8a0d0d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:33,873 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57d5f4b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:33,978 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67d70e61{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/java.io.tmpdir/jetty-localhost-37905-hadoop-hdfs-3_4_1-tests_jar-_-any-14926087552604008291/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:33,979 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f1a012{HTTP/1.1, (http/1.1)}{localhost:37905} 2024-11-16T08:37:33,979 INFO [Time-limited test {}] server.Server(415): Started @172215ms 2024-11-16T08:37:33,980 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:37:33,998 WARN [ResponseProcessor for block BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:33,998 WARN [ResponseProcessor for block BP-921546855-172.17.0.3-1731746236475:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-921546855-172.17.0.3-1731746236475:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:33,998 WARN [ResponseProcessor for block BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:33,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_111946686_22 at /127.0.0.1:51740 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36335:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51740 dst: /127.0.0.1:36335 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:33,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_111946686_22 at /127.0.0.1:51738 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36335:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51738 dst: /127.0.0.1:36335 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:34,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-178266705_22 at /127.0.0.1:51728 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36335:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51728 dst: /127.0.0.1:36335 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:34,001 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ea5a7a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:34,001 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47ffea33{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:34,002 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:34,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62bbed65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:34,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a48749e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:34,003 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:37:34,003 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:37:34,003 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:37:34,003 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-921546855-172.17.0.3-1731746236475 (Datanode Uuid 882be36c-b551-492c-9d5c-f23c55439f3e) service to localhost/127.0.0.1:40709 2024-11-16T08:37:34,004 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data1/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:34,004 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data2/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:34,005 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:37:34,014 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:34,018 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:34,019 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:34,019 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:34,019 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:37:34,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49f94f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:34,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64685bd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:34,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@652ca842{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/java.io.tmpdir/jetty-localhost-36517-hadoop-hdfs-3_4_1-tests_jar-_-any-11092096267612132903/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:34,133 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d802677{HTTP/1.1, (http/1.1)}{localhost:36517} 2024-11-16T08:37:34,133 INFO [Time-limited test {}] server.Server(415): Started @172369ms 2024-11-16T08:37:34,135 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:37:34,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:34,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:34,692 WARN [Thread-1323 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:37:34,695 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4632870db28c9980 with lease ID 0x55f22628a93a7f52: from storage DS-e17595d2-9167-4def-b028-0973e110976b node DatanodeRegistration(127.0.0.1:45559, datanodeUuid=6ca2f07c-8fe0-461e-81df-db1309519ed2, infoPort=45839, infoSecurePort=0, ipcPort=42925, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:34,695 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4632870db28c9980 with lease ID 0x55f22628a93a7f52: from storage DS-e9c62e18-b488-4404-a6cb-911672c6b333 node DatanodeRegistration(127.0.0.1:45559, datanodeUuid=6ca2f07c-8fe0-461e-81df-db1309519ed2, infoPort=45839, infoSecurePort=0, ipcPort=42925, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T08:37:34,867 WARN [Thread-1343 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:37:34,869 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2792d81b7280e300 with lease ID 0x55f22628a93a7f53: from storage DS-9a89e58d-9857-4b31-be9b-41364b271d9b node DatanodeRegistration(127.0.0.1:45673, datanodeUuid=882be36c-b551-492c-9d5c-f23c55439f3e, infoPort=41695, infoSecurePort=0, ipcPort=39971, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:34,869 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2792d81b7280e300 with lease ID 0x55f22628a93a7f53: from storage DS-7a077a4e-530c-4bb4-a3e1-aaa2a4f98717 node DatanodeRegistration(127.0.0.1:45673, datanodeUuid=882be36c-b551-492c-9d5c-f23c55439f3e, infoPort=41695, infoSecurePort=0, ipcPort=39971, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:35,154 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-16T08:37:35,156 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-16T08:37:35,158 ERROR [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca-prefix:c27dd56784bd,35969,1731746239872 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:35,158 WARN [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca-prefix:c27dd56784bd,35969,1731746239872 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:35,158 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C35969%2C1731746239872:(num 1731746241020) roll requested 2024-11-16T08:37:35,158 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C35969%2C1731746239872.1731746255158 2024-11-16T08:37:35,163 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 newFile=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 2024-11-16T08:37:35,164 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:35,164 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:35,164 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:35,164 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:35,164 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:35,164 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 2024-11-16T08:37:35,164 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:35,165 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:35,165 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 2024-11-16T08:37:35,165 WARN [IPC Server handler 0 on default port 40709 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-16T08:37:35,165 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 after 0ms 2024-11-16T08:37:35,167 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41695:41695),(127.0.0.1/127.0.0.1:45839:45839)] 2024-11-16T08:37:35,167 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 is not closed yet, will try archiving it next time 2024-11-16T08:37:35,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:35,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:36,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:36,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:37,170 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-16T08:37:37,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:37,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:38,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:38,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:39,166 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 after 4001ms 2024-11-16T08:37:39,173 WARN [ResponseProcessor for block BP-921546855-172.17.0.3-1731746236475:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-921546855-172.17.0.3-1731746236475:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:39,173 WARN [DataStreamer for file /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 block BP-921546855-172.17.0.3-1731746236475:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-921546855-172.17.0.3-1731746236475:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45673,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK], DatanodeInfoWithStorage[127.0.0.1:45559,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45673,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]) is bad. 2024-11-16T08:37:39,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_111946686_22 at /127.0.0.1:35100 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45673:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35100 dst: /127.0.0.1:45673 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:39,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_111946686_22 at /127.0.0.1:60916 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45559:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60916 dst: /127.0.0.1:45559 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:39,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@652ca842{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:39,208 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d802677{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:39,208 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:39,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64685bd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:39,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49f94f8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:39,209 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:37:39,210 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:37:39,210 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-921546855-172.17.0.3-1731746236475 (Datanode Uuid 882be36c-b551-492c-9d5c-f23c55439f3e) service to localhost/127.0.0.1:40709 2024-11-16T08:37:39,210 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:37:39,210 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data1/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:39,210 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data2/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:39,211 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:37:39,218 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:39,221 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:39,222 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:39,222 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:39,222 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:37:39,222 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a0cdfff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:39,223 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c9115f6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:39,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:39,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e9f8962{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/java.io.tmpdir/jetty-localhost-39203-hadoop-hdfs-3_4_1-tests_jar-_-any-1618797810926707326/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:39,362 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a79eeb5{HTTP/1.1, (http/1.1)}{localhost:39203} 2024-11-16T08:37:39,362 INFO [Time-limited test {}] server.Server(415): Started @177598ms 2024-11-16T08:37:39,364 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:37:39,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:39,417 WARN [ResponseProcessor for block BP-921546855-172.17.0.3-1731746236475:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-921546855-172.17.0.3-1731746236475:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:39,417 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_111946686_22 at /127.0.0.1:60944 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45559:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60944 dst: /127.0.0.1:45559 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:39,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67d70e61{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:39,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f1a012{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:39,430 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:39,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57d5f4b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:39,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f8a0d0d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:39,432 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:37:39,432 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:37:39,432 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-921546855-172.17.0.3-1731746236475 (Datanode Uuid 6ca2f07c-8fe0-461e-81df-db1309519ed2) service to localhost/127.0.0.1:40709 2024-11-16T08:37:39,432 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:37:39,432 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data3/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:39,433 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data4/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:39,433 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:37:39,440 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:39,444 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:39,444 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:39,445 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:39,445 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:37:39,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ffcd24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:39,446 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16fecb8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:39,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39ebff59{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/java.io.tmpdir/jetty-localhost-40985-hadoop-hdfs-3_4_1-tests_jar-_-any-3739799285443182704/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:39,604 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@160f7ba9{HTTP/1.1, (http/1.1)}{localhost:40985} 2024-11-16T08:37:39,604 INFO [Time-limited test {}] server.Server(415): Started @177840ms 2024-11-16T08:37:39,605 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:37:39,890 WARN [Thread-1397 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:37:39,892 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63a298c77b24cce with lease ID 0x55f22628a93a7f54: from storage DS-9a89e58d-9857-4b31-be9b-41364b271d9b node DatanodeRegistration(127.0.0.1:34255, datanodeUuid=882be36c-b551-492c-9d5c-f23c55439f3e, infoPort=39591, infoSecurePort=0, ipcPort=44399, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:39,892 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x63a298c77b24cce with lease ID 0x55f22628a93a7f54: from storage DS-7a077a4e-530c-4bb4-a3e1-aaa2a4f98717 node DatanodeRegistration(127.0.0.1:34255, datanodeUuid=882be36c-b551-492c-9d5c-f23c55439f3e, infoPort=39591, infoSecurePort=0, ipcPort=44399, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:40,110 WARN [Thread-1417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:37:40,112 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T08:37:40,113 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb7944d83d8d3b63 with lease ID 0x55f22628a93a7f55: from storage DS-e17595d2-9167-4def-b028-0973e110976b node DatanodeRegistration(127.0.0.1:32985, datanodeUuid=6ca2f07c-8fe0-461e-81df-db1309519ed2, infoPort=44999, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:40,113 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xeb7944d83d8d3b63 with lease ID 0x55f22628a93a7f55: from storage DS-e9c62e18-b488-4404-a6cb-911672c6b333 node DatanodeRegistration(127.0.0.1:32985, datanodeUuid=6ca2f07c-8fe0-461e-81df-db1309519ed2, infoPort=44999, infoSecurePort=0, ipcPort=45205, storageInfo=lv=-57;cid=testClusterID;nsid=203180900;c=1731746236475), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T08:37:40,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:40,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:40,640 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-16T08:37:40,642 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-16T08:37:40,643 ERROR [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca-prefix:c27dd56784bd,35969,1731746239872 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45559,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:40,643 WARN [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca-prefix:c27dd56784bd,35969,1731746239872 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45559,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:40,644 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C35969%2C1731746239872:(num 1731746255158) roll requested 2024-11-16T08:37:40,644 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C35969%2C1731746239872.1731746260644 2024-11-16T08:37:40,651 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 newFile=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746260644 2024-11-16T08:37:40,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:40,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:40,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:40,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:40,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:40,652 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746260644 2024-11-16T08:37:40,652 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45559,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:40,652 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45559,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:40,652 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 2024-11-16T08:37:40,652 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39591:39591),(127.0.0.1/127.0.0.1:44999:44999)] 2024-11-16T08:37:40,652 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 is not closed yet, will try archiving it next time 2024-11-16T08:37:40,652 WARN [IPC Server handler 4 on default port 40709 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-16T08:37:40,653 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 after 1ms 2024-11-16T08:37:41,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:41,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:41,892 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T08:37:42,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:42,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:42,654 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C35969%2C1731746239872.1731746262653 2024-11-16T08:37:42,662 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746260644 newFile=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 2024-11-16T08:37:42,662 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:42,663 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:42,663 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:42,663 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:42,663 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:42,663 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746260644 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 2024-11-16T08:37:42,664 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44999:44999),(127.0.0.1/127.0.0.1:39591:39591)] 2024-11-16T08:37:42,664 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 is not closed yet, will try archiving it next time 2024-11-16T08:37:42,664 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746260644 is not closed yet, will try archiving it next time 2024-11-16T08:37:42,665 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 2024-11-16T08:37:42,665 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 2024-11-16T08:37:42,665 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 after 0ms 2024-11-16T08:37:42,665 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 2024-11-16T08:37:42,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741838_1019 (size=1264) 2024-11-16T08:37:42,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741838_1019 (size=1264) 2024-11-16T08:37:42,669 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 is not closed yet, will try archiving it next time 2024-11-16T08:37:42,679 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731746242207/Put/vlen=218/seqid=0] 2024-11-16T08:37:42,679 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731746251802/Put/vlen=1045/seqid=0] 2024-11-16T08:37:42,679 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746241020 2024-11-16T08:37:42,679 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 2024-11-16T08:37:42,679 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 2024-11-16T08:37:42,680 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 after 1ms 2024-11-16T08:37:42,680 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 2024-11-16T08:37:42,685 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731746255158/Put/vlen=1045/seqid=0] 2024-11-16T08:37:42,686 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731746257171/Put/vlen=1045/seqid=0] 2024-11-16T08:37:42,686 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 2024-11-16T08:37:42,686 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746260644 2024-11-16T08:37:42,686 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746260644 2024-11-16T08:37:42,686 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746260644 after 0ms 2024-11-16T08:37:42,686 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746260644 2024-11-16T08:37:42,691 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731746260643/Put/vlen=1045/seqid=0] 2024-11-16T08:37:42,692 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 2024-11-16T08:37:42,692 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 2024-11-16T08:37:42,692 WARN [IPC Server handler 2 on default port 40709 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-16T08:37:42,693 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 after 1ms 2024-11-16T08:37:43,114 WARN [ResponseProcessor for block BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:43,114 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-178266705_22 at /127.0.0.1:49954 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:32985:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49954 dst: /127.0.0.1:32985 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:32985 remote=/127.0.0.1:49954]. Total timeout mills is 60000, 59547 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:43,114 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-178266705_22 at /127.0.0.1:48588 [Receiving block BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48588 dst: /127.0.0.1:34255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:37:43,115 WARN [DataStreamer for file /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 block BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32985,DS-e17595d2-9167-4def-b028-0973e110976b,DISK], DatanodeInfoWithStorage[127.0.0.1:34255,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32985,DS-e17595d2-9167-4def-b028-0973e110976b,DISK]) is bad. 2024-11-16T08:37:43,116 WARN [DataStreamer for file /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 block BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:43,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741839_1022 (size=85) 2024-11-16T08:37:43,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741839_1022 (size=85) 2024-11-16T08:37:43,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:43,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:44,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:44,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:44,654 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746255158 after 4002ms 2024-11-16T08:37:45,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:45,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:46,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:46,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:46,693 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 after 4001ms 2024-11-16T08:37:46,693 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 2024-11-16T08:37:46,697 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 2024-11-16T08:37:46,698 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-16T08:37:46,698 ERROR [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca-prefix:c27dd56784bd,35969,1731746239872.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:46,698 WARN [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca-prefix:c27dd56784bd,35969,1731746239872.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:46,698 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C35969%2C1731746239872.meta:.meta(num 1731746241414) roll requested 2024-11-16T08:37:46,699 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C35969%2C1731746239872.meta.1731746266698.meta 2024-11-16T08:37:46,703 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:46,703 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:46,703 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:46,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:46,704 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:46,704 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746266698.meta 2024-11-16T08:37:46,704 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:46,704 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:46,704 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta 2024-11-16T08:37:46,705 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39591:39591),(127.0.0.1/127.0.0.1:44999:44999)] 2024-11-16T08:37:46,705 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta is not closed yet, will try archiving it next time 2024-11-16T08:37:46,705 WARN [IPC Server handler 1 on default port 40709 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-11-16T08:37:46,705 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta after 1ms 2024-11-16T08:37:46,720 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/.tmp/info/56e82bf65db44030bc72e43a0d3c3a78 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7./info:regioninfo/1731746242212/Put/seqid=0 2024-11-16T08:37:46,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741841_1025 (size=7125) 2024-11-16T08:37:46,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741841_1025 (size=7125) 2024-11-16T08:37:47,135 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/.tmp/info/56e82bf65db44030bc72e43a0d3c3a78 2024-11-16T08:37:47,162 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/.tmp/ns/e57df401452d4dadb89a0a8cbde3394d is 43, key is default/ns:d/1731746241603/Put/seqid=0 2024-11-16T08:37:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741842_1026 (size=5153) 2024-11-16T08:37:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741842_1026 (size=5153) 2024-11-16T08:37:47,176 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/.tmp/ns/e57df401452d4dadb89a0a8cbde3394d 2024-11-16T08:37:47,200 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/.tmp/table/d62087df57924f529d98253d2bd48f07 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731746242229/Put/seqid=0 2024-11-16T08:37:47,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741843_1027 (size=5438) 2024-11-16T08:37:47,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741843_1027 (size=5438) 2024-11-16T08:37:47,216 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/.tmp/table/d62087df57924f529d98253d2bd48f07 2024-11-16T08:37:47,224 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/.tmp/info/56e82bf65db44030bc72e43a0d3c3a78 as hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/info/56e82bf65db44030bc72e43a0d3c3a78 2024-11-16T08:37:47,234 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/info/56e82bf65db44030bc72e43a0d3c3a78, entries=10, sequenceid=11, filesize=7.0 K 2024-11-16T08:37:47,236 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/.tmp/ns/e57df401452d4dadb89a0a8cbde3394d as hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/ns/e57df401452d4dadb89a0a8cbde3394d 2024-11-16T08:37:47,246 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/ns/e57df401452d4dadb89a0a8cbde3394d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T08:37:47,248 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/.tmp/table/d62087df57924f529d98253d2bd48f07 as hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/table/d62087df57924f529d98253d2bd48f07 2024-11-16T08:37:47,257 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/table/d62087df57924f529d98253d2bd48f07, entries=2, sequenceid=11, filesize=5.3 K 2024-11-16T08:37:47,259 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 562ms, sequenceid=11, compaction requested=false 2024-11-16T08:37:47,259 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T08:37:47,260 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c8f15dc6acba8171e6b5a3f4c7ba87c7 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-16T08:37:47,260 ERROR [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca-prefix:c27dd56784bd,35969,1731746239872 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:47,260 WARN [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca-prefix:c27dd56784bd,35969,1731746239872 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:47,261 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C35969%2C1731746239872:(num 1731746262653) roll requested 2024-11-16T08:37:47,261 INFO [regionserver/c27dd56784bd:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C35969%2C1731746239872.1731746267261 2024-11-16T08:37:47,274 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 newFile=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746267261 2024-11-16T08:37:47,275 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,275 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,275 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,275 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,275 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,275 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746267261 2024-11-16T08:37:47,275 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:47,276 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-921546855-172.17.0.3-1731746236475:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:47,276 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 2024-11-16T08:37:47,277 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 after 1ms 2024-11-16T08:37:47,284 DEBUG [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39591:39591),(127.0.0.1/127.0.0.1:44999:44999)] 2024-11-16T08:37:47,284 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.1731746262653 to hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/oldWALs/c27dd56784bd%2C35969%2C1731746239872.1731746262653 2024-11-16T08:37:47,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:47,301 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/c8f15dc6acba8171e6b5a3f4c7ba87c7/.tmp/info/8b886d6812ba4b12a6554b44b04efb96 is 1080, key is row1002/info:/1731746251802/Put/seqid=0 2024-11-16T08:37:47,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741845_1029 (size=9270) 2024-11-16T08:37:47,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741845_1029 (size=9270) 2024-11-16T08:37:47,315 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/c8f15dc6acba8171e6b5a3f4c7ba87c7/.tmp/info/8b886d6812ba4b12a6554b44b04efb96 2024-11-16T08:37:47,324 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/c8f15dc6acba8171e6b5a3f4c7ba87c7/.tmp/info/8b886d6812ba4b12a6554b44b04efb96 as hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/c8f15dc6acba8171e6b5a3f4c7ba87c7/info/8b886d6812ba4b12a6554b44b04efb96 2024-11-16T08:37:47,334 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/c8f15dc6acba8171e6b5a3f4c7ba87c7/info/8b886d6812ba4b12a6554b44b04efb96, entries=4, sequenceid=8, filesize=9.1 K 2024-11-16T08:37:47,335 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for c8f15dc6acba8171e6b5a3f4c7ba87c7 in 76ms, sequenceid=8, compaction requested=false 2024-11-16T08:37:47,335 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c8f15dc6acba8171e6b5a3f4c7ba87c7: 2024-11-16T08:37:47,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T08:37:47,343 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:37:47,343 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:37:47,343 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:47,343 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:47,343 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T08:37:47,343 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T08:37:47,343 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=349553566, stopped=false 2024-11-16T08:37:47,343 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c27dd56784bd,41917,1731746239663 2024-11-16T08:37:47,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:47,408 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:37:47,408 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:47,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:37:47,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:47,409 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:37:47,409 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:37:47,409 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:37:47,409 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:47,409 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c27dd56784bd,35969,1731746239872' ***** 2024-11-16T08:37:47,409 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T08:37:47,409 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:37:47,410 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T08:37:47,410 INFO [RS:0;c27dd56784bd:35969 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T08:37:47,410 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T08:37:47,410 INFO [RS:0;c27dd56784bd:35969 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T08:37:47,410 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:37:47,410 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(3091): Received CLOSE for c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:47,410 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(959): stopping server c27dd56784bd,35969,1731746239872 2024-11-16T08:37:47,410 INFO [RS:0;c27dd56784bd:35969 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:37:47,410 INFO [RS:0;c27dd56784bd:35969 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c27dd56784bd:35969. 2024-11-16T08:37:47,411 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c8f15dc6acba8171e6b5a3f4c7ba87c7, disabling compactions & flushes 2024-11-16T08:37:47,411 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:47,411 DEBUG [RS:0;c27dd56784bd:35969 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:37:47,411 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:47,411 DEBUG [RS:0;c27dd56784bd:35969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:47,411 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. after waiting 0 ms 2024-11-16T08:37:47,411 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:47,411 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T08:37:47,411 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T08:37:47,411 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T08:37:47,411 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T08:37:47,411 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T08:37:47,411 DEBUG [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, c8f15dc6acba8171e6b5a3f4c7ba87c7=TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7.} 2024-11-16T08:37:47,412 DEBUG [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c8f15dc6acba8171e6b5a3f4c7ba87c7 2024-11-16T08:37:47,412 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:37:47,412 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:37:47,412 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:37:47,412 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:37:47,412 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:37:47,420 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/default/TestLogRolling-testLogRollOnPipelineRestart/c8f15dc6acba8171e6b5a3f4c7ba87c7/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-16T08:37:47,420 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:47,420 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c8f15dc6acba8171e6b5a3f4c7ba87c7: Waiting for close lock at 1731746267410Running coprocessor pre-close hooks at 1731746267410Disabling compacts and flushes for region at 1731746267410Disabling writes for close at 1731746267411 (+1 ms)Writing region close event to WAL at 1731746267412 (+1 ms)Running coprocessor post-close hooks at 1731746267420 (+8 ms)Closed at 1731746267420 2024-11-16T08:37:47,421 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731746241742.c8f15dc6acba8171e6b5a3f4c7ba87c7. 2024-11-16T08:37:47,425 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T08:37:47,426 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:37:47,426 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:37:47,426 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746267411Running coprocessor pre-close hooks at 1731746267411Disabling compacts and flushes for region at 1731746267411Disabling writes for close at 1731746267412 (+1 ms)Writing region close event to WAL at 1731746267418 (+6 ms)Running coprocessor post-close hooks at 1731746267426 (+8 ms)Closed at 1731746267426 2024-11-16T08:37:47,426 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T08:37:47,612 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(976): stopping server c27dd56784bd,35969,1731746239872; all regions closed. 2024-11-16T08:37:47,612 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,612 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,612 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,612 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,613 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:47,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741840_1023 (size=825) 2024-11-16T08:37:47,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741840_1023 (size=825) 2024-11-16T08:37:47,925 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-16T08:37:47,925 INFO [regionserver/c27dd56784bd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-16T08:37:48,113 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T08:37:48,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:48,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:48,885 INFO [regionserver/c27dd56784bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:37:49,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:49,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:49,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:37:49,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T08:37:49,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-16T08:37:49,637 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T08:37:50,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:50,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:50,706 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta after 4002ms 2024-11-16T08:37:50,707 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/WALs/c27dd56784bd,35969,1731746239872/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta to hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/oldWALs/c27dd56784bd%2C35969%2C1731746239872.meta.1731746241414.meta 2024-11-16T08:37:50,711 DEBUG [RS:0;c27dd56784bd:35969 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/oldWALs 2024-11-16T08:37:50,711 INFO [RS:0;c27dd56784bd:35969 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C35969%2C1731746239872.meta:.meta(num 1731746266698) 2024-11-16T08:37:50,713 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:50,713 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:50,713 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:50,713 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:50,713 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:50,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741844_1028 (size=1162) 2024-11-16T08:37:50,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741844_1028 (size=1162) 2024-11-16T08:37:51,124 DEBUG [RS:0;c27dd56784bd:35969 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/oldWALs 2024-11-16T08:37:51,124 INFO [RS:0;c27dd56784bd:35969 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C35969%2C1731746239872:(num 1731746267261) 2024-11-16T08:37:51,124 DEBUG [RS:0;c27dd56784bd:35969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:51,124 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:37:51,124 INFO [RS:0;c27dd56784bd:35969 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:37:51,124 INFO [RS:0;c27dd56784bd:35969 {}] hbase.ChoreService(370): Chore service for: regionserver/c27dd56784bd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T08:37:51,125 INFO [RS:0;c27dd56784bd:35969 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:37:51,125 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:37:51,125 INFO [RS:0;c27dd56784bd:35969 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35969 2024-11-16T08:37:51,190 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c27dd56784bd,35969,1731746239872 2024-11-16T08:37:51,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:37:51,190 INFO [RS:0;c27dd56784bd:35969 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:37:51,201 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c27dd56784bd,35969,1731746239872] 2024-11-16T08:37:51,211 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c27dd56784bd,35969,1731746239872 already deleted, retry=false 2024-11-16T08:37:51,211 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c27dd56784bd,35969,1731746239872 expired; onlineServers=0 2024-11-16T08:37:51,211 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c27dd56784bd,41917,1731746239663' ***** 2024-11-16T08:37:51,211 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T08:37:51,212 INFO [M:0;c27dd56784bd:41917 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:37:51,212 INFO [M:0;c27dd56784bd:41917 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:37:51,212 DEBUG [M:0;c27dd56784bd:41917 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T08:37:51,212 DEBUG [M:0;c27dd56784bd:41917 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T08:37:51,212 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T08:37:51,212 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746240748 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746240748,5,FailOnTimeoutGroup] 2024-11-16T08:37:51,212 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746240749 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746240749,5,FailOnTimeoutGroup] 2024-11-16T08:37:51,212 INFO [M:0;c27dd56784bd:41917 {}] hbase.ChoreService(370): Chore service for: master/c27dd56784bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T08:37:51,213 INFO [M:0;c27dd56784bd:41917 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:37:51,213 DEBUG [M:0;c27dd56784bd:41917 {}] master.HMaster(1795): Stopping service threads 2024-11-16T08:37:51,213 INFO [M:0;c27dd56784bd:41917 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T08:37:51,213 INFO [M:0;c27dd56784bd:41917 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:37:51,213 INFO [M:0;c27dd56784bd:41917 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T08:37:51,213 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T08:37:51,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T08:37:51,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:51,222 DEBUG [M:0;c27dd56784bd:41917 {}] zookeeper.ZKUtil(347): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T08:37:51,222 WARN [M:0;c27dd56784bd:41917 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T08:37:51,223 INFO [M:0;c27dd56784bd:41917 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/.lastflushedseqids 2024-11-16T08:37:51,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741846_1030 (size=111) 2024-11-16T08:37:51,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741846_1030 (size=111) 2024-11-16T08:37:51,234 INFO [M:0;c27dd56784bd:41917 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T08:37:51,234 INFO [M:0;c27dd56784bd:41917 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T08:37:51,234 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:37:51,234 INFO [M:0;c27dd56784bd:41917 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:51,234 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:51,234 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:37:51,234 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:51,234 INFO [M:0;c27dd56784bd:41917 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-16T08:37:51,235 ERROR [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData-prefix:c27dd56784bd,41917,1731746239663 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:51,235 WARN [FSHLog-0-hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData-prefix:c27dd56784bd,41917,1731746239663 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:51,235 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c27dd56784bd%2C41917%2C1731746239663:(num 1731746240455) roll requested 2024-11-16T08:37:51,235 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C41917%2C1731746239663.1731746271235 2024-11-16T08:37:51,240 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,240 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,240 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,240 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,240 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,240 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746240455 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746271235 2024-11-16T08:37:51,241 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:51,241 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36335,DS-9a89e58d-9857-4b31-be9b-41364b271d9b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-16T08:37:51,241 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746240455 2024-11-16T08:37:51,241 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44999:44999),(127.0.0.1/127.0.0.1:39591:39591)] 2024-11-16T08:37:51,241 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746240455 is not closed yet, will try archiving it next time 2024-11-16T08:37:51,241 WARN [IPC Server handler 2 on default port 40709 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746240455 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-16T08:37:51,242 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746240455 after 1ms 2024-11-16T08:37:51,259 DEBUG [M:0;c27dd56784bd:41917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aaddef43586c457c9642c97ed20f31d0 is 82, key is hbase:meta,,1/info:regioninfo/1731746241518/Put/seqid=0 2024-11-16T08:37:51,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741848_1033 (size=5672) 2024-11-16T08:37:51,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741848_1033 (size=5672) 2024-11-16T08:37:51,265 INFO [M:0;c27dd56784bd:41917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aaddef43586c457c9642c97ed20f31d0 2024-11-16T08:37:51,286 DEBUG [M:0;c27dd56784bd:41917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cb90118574144063b432de8bffdcedaa is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731746242240/Put/seqid=0 2024-11-16T08:37:51,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:51,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741849_1034 (size=6118) 2024-11-16T08:37:51,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741849_1034 (size=6118) 2024-11-16T08:37:51,297 INFO [M:0;c27dd56784bd:41917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cb90118574144063b432de8bffdcedaa 2024-11-16T08:37:51,301 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:51,301 INFO [RS:0;c27dd56784bd:35969 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:37:51,301 INFO [RS:0;c27dd56784bd:35969 {}] regionserver.HRegionServer(1031): Exiting; stopping=c27dd56784bd,35969,1731746239872; zookeeper connection closed. 2024-11-16T08:37:51,301 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x10142ca90440001, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:51,301 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@53ad5611 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@53ad5611 2024-11-16T08:37:51,301 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T08:37:51,318 DEBUG [M:0;c27dd56784bd:41917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a7468528ecf94f4094df3f7e1a5c2094 is 69, key is c27dd56784bd,35969,1731746239872/rs:state/1731746240851/Put/seqid=0 2024-11-16T08:37:51,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741850_1035 (size=5156) 2024-11-16T08:37:51,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741850_1035 (size=5156) 2024-11-16T08:37:51,323 INFO [M:0;c27dd56784bd:41917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a7468528ecf94f4094df3f7e1a5c2094 2024-11-16T08:37:51,344 DEBUG [M:0;c27dd56784bd:41917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/60cf51536cf34641ab33b5eb6d068c8b is 52, key is load_balancer_on/state:d/1731746241736/Put/seqid=0 2024-11-16T08:37:51,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741851_1036 (size=5056) 2024-11-16T08:37:51,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741851_1036 (size=5056) 2024-11-16T08:37:51,350 INFO [M:0;c27dd56784bd:41917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/60cf51536cf34641ab33b5eb6d068c8b 2024-11-16T08:37:51,357 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aaddef43586c457c9642c97ed20f31d0 as hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aaddef43586c457c9642c97ed20f31d0 2024-11-16T08:37:51,363 INFO [M:0;c27dd56784bd:41917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aaddef43586c457c9642c97ed20f31d0, entries=8, sequenceid=56, filesize=5.5 K 2024-11-16T08:37:51,364 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cb90118574144063b432de8bffdcedaa as hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cb90118574144063b432de8bffdcedaa 2024-11-16T08:37:51,370 INFO [M:0;c27dd56784bd:41917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cb90118574144063b432de8bffdcedaa, entries=6, sequenceid=56, filesize=6.0 K 2024-11-16T08:37:51,372 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a7468528ecf94f4094df3f7e1a5c2094 as hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a7468528ecf94f4094df3f7e1a5c2094 2024-11-16T08:37:51,377 INFO [M:0;c27dd56784bd:41917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a7468528ecf94f4094df3f7e1a5c2094, entries=1, sequenceid=56, filesize=5.0 K 2024-11-16T08:37:51,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:51,378 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/60cf51536cf34641ab33b5eb6d068c8b as hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/60cf51536cf34641ab33b5eb6d068c8b 2024-11-16T08:37:51,383 INFO [M:0;c27dd56784bd:41917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/60cf51536cf34641ab33b5eb6d068c8b, entries=1, sequenceid=56, filesize=4.9 K 2024-11-16T08:37:51,384 INFO [M:0;c27dd56784bd:41917 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=56, compaction requested=false 2024-11-16T08:37:51,385 INFO [M:0;c27dd56784bd:41917 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:51,386 DEBUG [M:0;c27dd56784bd:41917 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746271234Disabling compacts and flushes for region at 1731746271234Disabling writes for close at 1731746271234Obtaining lock to block concurrent updates at 1731746271234Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731746271234Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731746271235 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731746271242 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731746271242Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731746271258 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731746271258Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731746271271 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731746271286 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731746271286Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731746271302 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731746271317 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731746271317Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731746271328 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731746271344 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731746271344Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2de06cd5: reopening flushed file at 1731746271356 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54d67bc2: reopening flushed file at 1731746271363 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f7e4603: reopening flushed file at 1731746271370 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1948a606: reopening flushed file at 1731746271377 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 150ms, sequenceid=56, compaction requested=false at 1731746271384 (+7 ms)Writing region close event to WAL at 1731746271385 (+1 ms)Closed at 1731746271385 2024-11-16T08:37:51,386 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,386 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,386 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,386 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,386 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:37:51,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34255 is added to blk_1073741847_1031 (size=757) 2024-11-16T08:37:51,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32985 is added to blk_1073741847_1031 (size=757) 2024-11-16T08:37:52,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:52,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:52,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,448 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,448 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,448 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,450 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,957 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T08:37:52,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,985 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:52,989 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:37:53,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:53,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:54,115 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-16T08:37:54,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:54,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:55,242 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746240455 after 4001ms 2024-11-16T08:37:55,243 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/WALs/c27dd56784bd,41917,1731746239663/c27dd56784bd%2C41917%2C1731746239663.1731746240455 to hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/oldWALs/c27dd56784bd%2C41917%2C1731746239663.1731746240455 2024-11-16T08:37:55,246 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/MasterData/oldWALs/c27dd56784bd%2C41917%2C1731746239663.1731746240455 to hdfs://localhost:40709/user/jenkins/test-data/7c5af985-897a-db44-c676-087e135f36ca/oldWALs/c27dd56784bd%2C41917%2C1731746239663.1731746240455$masterlocalwal$ 2024-11-16T08:37:55,246 INFO [M:0;c27dd56784bd:41917 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T08:37:55,246 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:37:55,247 INFO [M:0;c27dd56784bd:41917 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41917 2024-11-16T08:37:55,247 INFO [M:0;c27dd56784bd:41917 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:37:55,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:55,364 INFO [M:0;c27dd56784bd:41917 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:37:55,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:55,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41917-0x10142ca90440000, quorum=127.0.0.1:61509, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:37:55,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39ebff59{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:55,368 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@160f7ba9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:55,368 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:55,368 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16fecb8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:55,368 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ffcd24{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:55,370 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:37:55,370 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:37:55,370 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:37:55,370 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-921546855-172.17.0.3-1731746236475 (Datanode Uuid 6ca2f07c-8fe0-461e-81df-db1309519ed2) service to localhost/127.0.0.1:40709 2024-11-16T08:37:55,371 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data3/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:55,371 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data4/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:55,372 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:37:55,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e9f8962{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:55,374 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a79eeb5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:55,374 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:55,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c9115f6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:55,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a0cdfff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:55,377 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:37:55,377 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:37:55,377 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:37:55,377 WARN [BP-921546855-172.17.0.3-1731746236475 heartbeating to localhost/127.0.0.1:40709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-921546855-172.17.0.3-1731746236475 (Datanode Uuid 882be36c-b551-492c-9d5c-f23c55439f3e) service to localhost/127.0.0.1:40709 2024-11-16T08:37:55,378 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data1/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:55,378 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/cluster_5a80eb13-0e85-3694-9134-074b45462566/data/data2/current/BP-921546855-172.17.0.3-1731746236475 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:37:55,378 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:37:55,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:55,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@708201bd{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:37:55,385 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11255fea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:37:55,385 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:37:55,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c2762d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:37:55,385 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53effb5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir/,STOPPED} 2024-11-16T08:37:55,394 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T08:37:55,416 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T08:37:55,424 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 156) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40709 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:40709 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:40709 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40709 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:40709 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40709 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:40709 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40709 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 448) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=441 (was 409) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2106 (was 3521) 2024-11-16T08:37:55,433 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=441, ProcessCount=11, AvailableMemoryMB=2105 2024-11-16T08:37:55,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T08:37:55,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.log.dir so I do NOT create it in target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb 2024-11-16T08:37:55,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e4294179-bc89-904f-f726-7444b54eab0f/hadoop.tmp.dir so I do NOT create it in target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb 2024-11-16T08:37:55,433 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910, deleteOnExit=true 2024-11-16T08:37:55,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T08:37:55,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/test.cache.data in system properties and HBase conf 2024-11-16T08:37:55,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T08:37:55,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.log.dir in system properties and HBase conf 2024-11-16T08:37:55,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T08:37:55,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T08:37:55,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T08:37:55,434 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T08:37:55,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:37:55,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/nfs.dump.dir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/java.io.tmpdir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T08:37:55,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T08:37:55,449 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:37:55,795 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:55,801 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:55,809 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:55,809 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:55,809 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:37:55,810 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:55,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20f59884{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:55,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@190ad9e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:55,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43498b11{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/java.io.tmpdir/jetty-localhost-40527-hadoop-hdfs-3_4_1-tests_jar-_-any-2044728011302806281/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:37:55,918 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1367dc96{HTTP/1.1, (http/1.1)}{localhost:40527} 2024-11-16T08:37:55,918 INFO [Time-limited test {}] server.Server(415): Started @194154ms 2024-11-16T08:37:55,930 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:37:56,175 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:56,178 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:56,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:56,179 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:56,179 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:37:56,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4faae08f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:56,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c13156e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:56,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@40527499{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/java.io.tmpdir/jetty-localhost-42467-hadoop-hdfs-3_4_1-tests_jar-_-any-16801056645559504399/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:56,297 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c1ed8d4{HTTP/1.1, (http/1.1)}{localhost:42467} 2024-11-16T08:37:56,297 INFO [Time-limited test {}] server.Server(415): Started @194533ms 2024-11-16T08:37:56,298 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:37:56,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:56,332 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:37:56,336 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:37:56,337 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:37:56,337 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:37:56,337 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:37:56,337 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34534d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:37:56,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14a6d451{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:37:56,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:56,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c734161{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/java.io.tmpdir/jetty-localhost-39877-hadoop-hdfs-3_4_1-tests_jar-_-any-8464149174648814855/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:37:56,454 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75e789f0{HTTP/1.1, (http/1.1)}{localhost:39877} 2024-11-16T08:37:56,454 INFO [Time-limited test {}] server.Server(415): Started @194690ms 2024-11-16T08:37:56,455 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:37:57,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:57,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:57,643 WARN [Thread-1637 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/data/data1/current/BP-1093759444-172.17.0.3-1731746275460/current, will proceed with Du for space computation calculation, 2024-11-16T08:37:57,643 WARN [Thread-1638 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/data/data2/current/BP-1093759444-172.17.0.3-1731746275460/current, will proceed with Du for space computation calculation, 2024-11-16T08:37:57,689 WARN [Thread-1601 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:37:57,695 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66384ed78fec54a7 with lease ID 0xd52edee6a3ac04e5: Processing first storage report for DS-af99ae63-8e3a-4a75-b812-871f994e9e73 from datanode DatanodeRegistration(127.0.0.1:45569, datanodeUuid=5e969dde-d51a-4ea6-a6f7-633a38482fcd, infoPort=44917, infoSecurePort=0, ipcPort=39527, storageInfo=lv=-57;cid=testClusterID;nsid=2097342781;c=1731746275460) 2024-11-16T08:37:57,695 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66384ed78fec54a7 with lease ID 0xd52edee6a3ac04e5: from storage DS-af99ae63-8e3a-4a75-b812-871f994e9e73 node DatanodeRegistration(127.0.0.1:45569, datanodeUuid=5e969dde-d51a-4ea6-a6f7-633a38482fcd, infoPort=44917, infoSecurePort=0, ipcPort=39527, storageInfo=lv=-57;cid=testClusterID;nsid=2097342781;c=1731746275460), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:57,695 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66384ed78fec54a7 with lease ID 0xd52edee6a3ac04e5: Processing first storage report for DS-6a462572-8bd6-4f22-8835-8020bea87bc8 from datanode DatanodeRegistration(127.0.0.1:45569, datanodeUuid=5e969dde-d51a-4ea6-a6f7-633a38482fcd, infoPort=44917, infoSecurePort=0, ipcPort=39527, storageInfo=lv=-57;cid=testClusterID;nsid=2097342781;c=1731746275460) 2024-11-16T08:37:57,695 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66384ed78fec54a7 with lease ID 0xd52edee6a3ac04e5: from storage DS-6a462572-8bd6-4f22-8835-8020bea87bc8 node DatanodeRegistration(127.0.0.1:45569, datanodeUuid=5e969dde-d51a-4ea6-a6f7-633a38482fcd, infoPort=44917, infoSecurePort=0, ipcPort=39527, storageInfo=lv=-57;cid=testClusterID;nsid=2097342781;c=1731746275460), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:57,867 WARN [Thread-1648 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/data/data3/current/BP-1093759444-172.17.0.3-1731746275460/current, will proceed with Du for space computation calculation, 2024-11-16T08:37:57,867 WARN [Thread-1649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/data/data4/current/BP-1093759444-172.17.0.3-1731746275460/current, will proceed with Du for space computation calculation, 2024-11-16T08:37:57,889 WARN [Thread-1624 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:37:57,891 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc1fbad6952091bd1 with lease ID 0xd52edee6a3ac04e6: Processing first storage report for DS-06f9c115-ea8f-4ae3-9027-8d48367a0958 from datanode DatanodeRegistration(127.0.0.1:45583, datanodeUuid=2d71fedb-c708-435b-906a-527cc165243a, infoPort=42707, infoSecurePort=0, ipcPort=41507, storageInfo=lv=-57;cid=testClusterID;nsid=2097342781;c=1731746275460) 2024-11-16T08:37:57,891 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1fbad6952091bd1 with lease ID 0xd52edee6a3ac04e6: from storage DS-06f9c115-ea8f-4ae3-9027-8d48367a0958 node DatanodeRegistration(127.0.0.1:45583, datanodeUuid=2d71fedb-c708-435b-906a-527cc165243a, infoPort=42707, infoSecurePort=0, ipcPort=41507, storageInfo=lv=-57;cid=testClusterID;nsid=2097342781;c=1731746275460), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:57,891 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc1fbad6952091bd1 with lease ID 0xd52edee6a3ac04e6: Processing first storage report for DS-e243c322-0e50-4453-a0d2-d494462f717e from datanode DatanodeRegistration(127.0.0.1:45583, datanodeUuid=2d71fedb-c708-435b-906a-527cc165243a, infoPort=42707, infoSecurePort=0, ipcPort=41507, storageInfo=lv=-57;cid=testClusterID;nsid=2097342781;c=1731746275460) 2024-11-16T08:37:57,891 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1fbad6952091bd1 with lease ID 0xd52edee6a3ac04e6: from storage DS-e243c322-0e50-4453-a0d2-d494462f717e node DatanodeRegistration(127.0.0.1:45583, datanodeUuid=2d71fedb-c708-435b-906a-527cc165243a, infoPort=42707, infoSecurePort=0, ipcPort=41507, storageInfo=lv=-57;cid=testClusterID;nsid=2097342781;c=1731746275460), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:37:57,918 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb 2024-11-16T08:37:57,920 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/zookeeper_0, clientPort=56514, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T08:37:57,921 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56514 2024-11-16T08:37:57,922 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:57,923 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:57,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:37:57,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:37:57,935 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988 with version=8 2024-11-16T08:37:57,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/hbase-staging 2024-11-16T08:37:57,937 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:37:57,937 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:57,937 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:57,937 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:37:57,937 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:57,937 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:37:57,937 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T08:37:57,937 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:37:57,938 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37905 2024-11-16T08:37:57,939 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37905 connecting to ZooKeeper ensemble=127.0.0.1:56514 2024-11-16T08:37:57,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379050x0, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:37:57,996 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37905-0x10142cb25cb0000 connected 2024-11-16T08:37:58,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:58,087 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:58,090 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:37:58,090 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988, hbase.cluster.distributed=false 2024-11-16T08:37:58,093 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:37:58,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37905 2024-11-16T08:37:58,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37905 2024-11-16T08:37:58,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37905 2024-11-16T08:37:58,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37905 2024-11-16T08:37:58,095 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37905 2024-11-16T08:37:58,118 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:37:58,119 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:58,119 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:58,119 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:37:58,119 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:37:58,119 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:37:58,119 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T08:37:58,119 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:37:58,120 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44231 2024-11-16T08:37:58,122 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44231 connecting to ZooKeeper ensemble=127.0.0.1:56514 2024-11-16T08:37:58,123 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:58,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:58,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:442310x0, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:37:58,138 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:442310x0, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:37:58,138 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44231-0x10142cb25cb0001 connected 2024-11-16T08:37:58,138 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T08:37:58,139 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T08:37:58,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T08:37:58,141 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:37:58,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44231 2024-11-16T08:37:58,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44231 2024-11-16T08:37:58,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44231 2024-11-16T08:37:58,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44231 2024-11-16T08:37:58,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44231 2024-11-16T08:37:58,154 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c27dd56784bd:37905 2024-11-16T08:37:58,155 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c27dd56784bd,37905,1731746277937 2024-11-16T08:37:58,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:37:58,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:37:58,166 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c27dd56784bd,37905,1731746277937 2024-11-16T08:37:58,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T08:37:58,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,180 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T08:37:58,181 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c27dd56784bd,37905,1731746277937 from backup master directory 2024-11-16T08:37:58,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c27dd56784bd,37905,1731746277937 2024-11-16T08:37:58,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:37:58,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:37:58,190 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:37:58,190 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c27dd56784bd,37905,1731746277937 2024-11-16T08:37:58,196 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/hbase.id] with ID: b1328763-76b1-412a-bda2-b918c9fb8830 2024-11-16T08:37:58,196 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/.tmp/hbase.id 2024-11-16T08:37:58,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:37:58,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:37:58,203 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/.tmp/hbase.id]:[hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/hbase.id] 2024-11-16T08:37:58,214 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:58,214 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T08:37:58,215 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-16T08:37:58,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:37:58,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:37:58,230 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:37:58,231 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T08:37:58,231 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:37:58,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:37:58,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:37:58,239 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store 2024-11-16T08:37:58,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:37:58,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:37:58,246 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:58,247 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:37:58,247 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:58,247 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:58,247 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:37:58,247 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:58,247 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:37:58,247 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746278247Disabling compacts and flushes for region at 1731746278247Disabling writes for close at 1731746278247Writing region close event to WAL at 1731746278247Closed at 1731746278247 2024-11-16T08:37:58,248 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/.initializing 2024-11-16T08:37:58,248 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/WALs/c27dd56784bd,37905,1731746277937 2024-11-16T08:37:58,251 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C37905%2C1731746277937, suffix=, logDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/WALs/c27dd56784bd,37905,1731746277937, archiveDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/oldWALs, maxLogs=10 2024-11-16T08:37:58,251 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C37905%2C1731746277937.1731746278251 2024-11-16T08:37:58,256 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/WALs/c27dd56784bd,37905,1731746277937/c27dd56784bd%2C37905%2C1731746277937.1731746278251 2024-11-16T08:37:58,258 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42707:42707),(127.0.0.1/127.0.0.1:44917:44917)] 2024-11-16T08:37:58,258 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:37:58,258 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:58,259 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,259 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T08:37:58,261 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:58,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:58,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,263 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T08:37:58,263 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:58,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:37:58,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T08:37:58,266 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:58,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:37:58,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T08:37:58,268 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:58,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:37:58,268 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,269 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,270 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,271 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,271 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,272 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T08:37:58,273 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:37:58,275 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:37:58,276 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822545, jitterRate=0.0459204763174057}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T08:37:58,276 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731746278259Initializing all the Stores at 1731746278260 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746278260Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746278260Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746278260Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746278260Cleaning up temporary data from old regions at 1731746278271 (+11 ms)Region opened successfully at 1731746278276 (+5 ms) 2024-11-16T08:37:58,277 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T08:37:58,280 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5845d978, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:37:58,282 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T08:37:58,282 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T08:37:58,282 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T08:37:58,282 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T08:37:58,282 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T08:37:58,283 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T08:37:58,283 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T08:37:58,285 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T08:37:58,286 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T08:37:58,295 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T08:37:58,295 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T08:37:58,296 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T08:37:58,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:58,305 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T08:37:58,306 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T08:37:58,307 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T08:37:58,316 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T08:37:58,317 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T08:37:58,326 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T08:37:58,329 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T08:37:58,337 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T08:37:58,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:37:58,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:37:58,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,348 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c27dd56784bd,37905,1731746277937, sessionid=0x10142cb25cb0000, setting cluster-up flag (Was=false) 2024-11-16T08:37:58,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:58,400 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T08:37:58,402 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,37905,1731746277937 2024-11-16T08:37:58,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,453 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T08:37:58,455 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,37905,1731746277937 2024-11-16T08:37:58,457 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T08:37:58,459 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T08:37:58,459 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T08:37:58,460 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T08:37:58,460 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c27dd56784bd,37905,1731746277937 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T08:37:58,462 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:37:58,462 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:37:58,462 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:37:58,462 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:37:58,462 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c27dd56784bd:0, corePoolSize=10, maxPoolSize=10 2024-11-16T08:37:58,462 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,462 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:37:58,463 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,464 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731746308464 2024-11-16T08:37:58,464 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T08:37:58,464 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T08:37:58,464 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T08:37:58,464 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T08:37:58,464 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T08:37:58,464 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T08:37:58,464 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,465 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:37:58,465 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T08:37:58,465 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T08:37:58,465 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T08:37:58,465 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T08:37:58,465 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T08:37:58,465 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T08:37:58,466 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746278465,5,FailOnTimeoutGroup] 2024-11-16T08:37:58,466 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746278466,5,FailOnTimeoutGroup] 2024-11-16T08:37:58,466 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,466 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T08:37:58,466 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,466 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,466 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:58,466 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T08:37:58,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:37:58,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:37:58,473 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T08:37:58,473 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988 2024-11-16T08:37:58,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:37:58,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:37:58,481 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:58,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:37:58,484 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:37:58,485 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:58,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:58,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:37:58,487 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:37:58,487 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:58,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:58,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:37:58,489 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:37:58,489 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:58,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:58,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:37:58,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:37:58,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:58,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:58,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:37:58,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740 2024-11-16T08:37:58,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740 2024-11-16T08:37:58,494 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:37:58,494 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:37:58,494 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:37:58,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:37:58,497 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:37:58,497 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833299, jitterRate=0.059594959020614624}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:37:58,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731746278481Initializing all the Stores at 1731746278482 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746278482Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746278482Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746278482Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746278482Cleaning up temporary data from old regions at 1731746278494 (+12 ms)Region opened successfully at 1731746278498 (+4 ms) 2024-11-16T08:37:58,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:37:58,498 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:37:58,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:37:58,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:37:58,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:37:58,499 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:37:58,499 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746278498Disabling compacts and flushes for region at 1731746278498Disabling writes for close at 1731746278498Writing region close event to WAL at 1731746278499 (+1 ms)Closed at 1731746278499 2024-11-16T08:37:58,500 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:37:58,500 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T08:37:58,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T08:37:58,502 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:37:58,503 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T08:37:58,544 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(746): ClusterId : b1328763-76b1-412a-bda2-b918c9fb8830 2024-11-16T08:37:58,544 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T08:37:58,556 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T08:37:58,556 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T08:37:58,570 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T08:37:58,570 DEBUG [RS:0;c27dd56784bd:44231 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11d9be6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:37:58,586 DEBUG [RS:0;c27dd56784bd:44231 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c27dd56784bd:44231 2024-11-16T08:37:58,586 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T08:37:58,586 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T08:37:58,586 DEBUG [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T08:37:58,586 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(2659): reportForDuty to master=c27dd56784bd,37905,1731746277937 with port=44231, startcode=1731746278118 2024-11-16T08:37:58,587 DEBUG [RS:0;c27dd56784bd:44231 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T08:37:58,589 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48249, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T08:37:58,589 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37905 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c27dd56784bd,44231,1731746278118 2024-11-16T08:37:58,589 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37905 {}] master.ServerManager(517): Registering regionserver=c27dd56784bd,44231,1731746278118 2024-11-16T08:37:58,591 DEBUG [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988 2024-11-16T08:37:58,591 DEBUG [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43867 2024-11-16T08:37:58,591 DEBUG [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T08:37:58,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:37:58,602 DEBUG [RS:0;c27dd56784bd:44231 {}] zookeeper.ZKUtil(111): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c27dd56784bd,44231,1731746278118 2024-11-16T08:37:58,602 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c27dd56784bd,44231,1731746278118] 2024-11-16T08:37:58,602 WARN [RS:0;c27dd56784bd:44231 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:37:58,603 INFO [RS:0;c27dd56784bd:44231 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:37:58,603 DEBUG [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118 2024-11-16T08:37:58,619 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T08:37:58,621 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T08:37:58,621 INFO [RS:0;c27dd56784bd:44231 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T08:37:58,621 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,628 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T08:37:58,629 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T08:37:58,629 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,630 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,630 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,630 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,630 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,630 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,630 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:37:58,630 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,630 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,630 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,631 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,631 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,631 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:37:58,631 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:37:58,631 DEBUG [RS:0;c27dd56784bd:44231 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:37:58,633 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,633 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,633 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,633 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,634 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,634 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44231,1731746278118-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:37:58,652 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T08:37:58,652 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,44231,1731746278118-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,653 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,653 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.Replication(171): c27dd56784bd,44231,1731746278118 started 2024-11-16T08:37:58,653 WARN [c27dd56784bd:37905 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T08:37:58,672 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:58,672 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(1482): Serving as c27dd56784bd,44231,1731746278118, RpcServer on c27dd56784bd/172.17.0.3:44231, sessionid=0x10142cb25cb0001 2024-11-16T08:37:58,672 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T08:37:58,672 DEBUG [RS:0;c27dd56784bd:44231 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c27dd56784bd,44231,1731746278118 2024-11-16T08:37:58,673 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,44231,1731746278118' 2024-11-16T08:37:58,673 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T08:37:58,673 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T08:37:58,674 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T08:37:58,674 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T08:37:58,674 DEBUG [RS:0;c27dd56784bd:44231 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c27dd56784bd,44231,1731746278118 2024-11-16T08:37:58,674 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,44231,1731746278118' 2024-11-16T08:37:58,674 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T08:37:58,675 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T08:37:58,675 DEBUG [RS:0;c27dd56784bd:44231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T08:37:58,675 INFO [RS:0;c27dd56784bd:44231 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T08:37:58,675 INFO [RS:0;c27dd56784bd:44231 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T08:37:58,777 INFO [RS:0;c27dd56784bd:44231 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C44231%2C1731746278118, suffix=, logDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118, archiveDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/oldWALs, maxLogs=32 2024-11-16T08:37:58,778 INFO [RS:0;c27dd56784bd:44231 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C44231%2C1731746278118.1731746278778 2024-11-16T08:37:58,801 INFO [RS:0;c27dd56784bd:44231 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746278778 2024-11-16T08:37:58,803 DEBUG [RS:0;c27dd56784bd:44231 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42707:42707),(127.0.0.1/127.0.0.1:44917:44917)] 2024-11-16T08:37:58,903 DEBUG [c27dd56784bd:37905 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T08:37:58,904 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c27dd56784bd,44231,1731746278118 2024-11-16T08:37:58,909 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,44231,1731746278118, state=OPENING 2024-11-16T08:37:58,924 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T08:37:58,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:37:58,944 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:37:58,944 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:37:58,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,44231,1731746278118}] 2024-11-16T08:37:58,945 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:37:59,101 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T08:37:59,106 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51885, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T08:37:59,113 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T08:37:59,113 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:37:59,115 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C44231%2C1731746278118.meta, suffix=.meta, logDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118, archiveDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/oldWALs, maxLogs=32 2024-11-16T08:37:59,116 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C44231%2C1731746278118.meta.1731746279116.meta 2024-11-16T08:37:59,141 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.meta.1731746279116.meta 2024-11-16T08:37:59,164 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42707:42707),(127.0.0.1/127.0.0.1:44917:44917)] 2024-11-16T08:37:59,181 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:37:59,181 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T08:37:59,181 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T08:37:59,182 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T08:37:59,182 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T08:37:59,182 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:59,182 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T08:37:59,182 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T08:37:59,183 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:37:59,184 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:37:59,184 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:59,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:59,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:37:59,186 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:37:59,186 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:59,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:59,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:37:59,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:37:59,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:59,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:59,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:37:59,191 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:37:59,191 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:59,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:37:59,192 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:37:59,193 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740 2024-11-16T08:37:59,194 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740 2024-11-16T08:37:59,196 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:37:59,196 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:37:59,196 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:37:59,199 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:37:59,200 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849378, jitterRate=0.08004066348075867}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:37:59,200 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T08:37:59,201 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731746279182Writing region info on filesystem at 1731746279182Initializing all the Stores at 1731746279183 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746279183Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746279183Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746279183Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746279183Cleaning up temporary data from old regions at 1731746279196 (+13 ms)Running coprocessor post-open hooks at 1731746279200 (+4 ms)Region opened successfully at 1731746279200 2024-11-16T08:37:59,202 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731746279101 2024-11-16T08:37:59,204 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T08:37:59,204 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T08:37:59,205 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,44231,1731746278118 2024-11-16T08:37:59,207 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,44231,1731746278118, state=OPEN 2024-11-16T08:37:59,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:37:59,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:37:59,249 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:37:59,249 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c27dd56784bd,44231,1731746278118 2024-11-16T08:37:59,249 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:37:59,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T08:37:59,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,44231,1731746278118 in 304 msec 2024-11-16T08:37:59,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T08:37:59,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 754 msec 2024-11-16T08:37:59,260 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:37:59,260 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T08:37:59,261 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:37:59,262 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,44231,1731746278118, seqNum=-1] 2024-11-16T08:37:59,262 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:37:59,264 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34907, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:37:59,283 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 823 msec 2024-11-16T08:37:59,284 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731746279284, completionTime=-1 2024-11-16T08:37:59,284 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T08:37:59,284 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T08:37:59,288 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T08:37:59,288 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731746339288 2024-11-16T08:37:59,288 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731746399288 2024-11-16T08:37:59,288 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-16T08:37:59,288 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,37905,1731746277937-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:59,288 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,37905,1731746277937-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:59,289 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,37905,1731746277937-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:59,289 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c27dd56784bd:37905, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:59,289 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:59,291 DEBUG [master/c27dd56784bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T08:37:59,292 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:59,298 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.108sec 2024-11-16T08:37:59,298 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T08:37:59,298 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T08:37:59,298 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T08:37:59,298 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T08:37:59,298 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T08:37:59,298 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,37905,1731746277937-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:37:59,298 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,37905,1731746277937-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T08:37:59,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:59,302 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T08:37:59,302 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T08:37:59,302 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,37905,1731746277937-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:37:59,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62ff6bde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:37:59,349 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c27dd56784bd,37905,-1 for getting cluster id 2024-11-16T08:37:59,349 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T08:37:59,356 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b1328763-76b1-412a-bda2-b918c9fb8830' 2024-11-16T08:37:59,356 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T08:37:59,356 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b1328763-76b1-412a-bda2-b918c9fb8830" 2024-11-16T08:37:59,357 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d35a37a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:37:59,357 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c27dd56784bd,37905,-1] 2024-11-16T08:37:59,357 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T08:37:59,357 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:37:59,359 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54732, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T08:37:59,360 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@550e9c9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:37:59,361 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:37:59,363 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,44231,1731746278118, seqNum=-1] 2024-11-16T08:37:59,363 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:37:59,365 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:37:59,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c27dd56784bd,37905,1731746277937 2024-11-16T08:37:59,368 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:37:59,370 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T08:37:59,371 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T08:37:59,372 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is c27dd56784bd,37905,1731746277937 2024-11-16T08:37:59,372 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@33bc5d59 2024-11-16T08:37:59,373 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T08:37:59,374 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54742, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T08:37:59,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T08:37:59,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T08:37:59,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:37:59,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:37:59,378 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T08:37:59,378 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:37:59,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-16T08:37:59,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:37:59,380 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T08:37:59,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:37:59,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741835_1011 (size=405) 2024-11-16T08:37:59,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741835_1011 (size=405) 2024-11-16T08:37:59,418 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-16T08:37:59,419 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-16T08:37:59,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:37:59,419 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-16T08:37:59,793 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 57d13d30c22f0f7484ab8215a05b82e7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988 2024-11-16T08:37:59,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741836_1012 (size=88) 2024-11-16T08:37:59,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741836_1012 (size=88) 2024-11-16T08:37:59,806 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:37:59,806 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 57d13d30c22f0f7484ab8215a05b82e7, disabling compactions & flushes 2024-11-16T08:37:59,806 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:37:59,806 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:37:59,806 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. after waiting 0 ms 2024-11-16T08:37:59,806 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:37:59,806 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:37:59,806 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 57d13d30c22f0f7484ab8215a05b82e7: Waiting for close lock at 1731746279806Disabling compacts and flushes for region at 1731746279806Disabling writes for close at 1731746279806Writing region close event to WAL at 1731746279806Closed at 1731746279806 2024-11-16T08:37:59,809 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T08:37:59,809 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731746279809"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731746279809"}]},"ts":"1731746279809"} 2024-11-16T08:37:59,819 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T08:37:59,820 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T08:37:59,820 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746279820"}]},"ts":"1731746279820"} 2024-11-16T08:37:59,822 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-16T08:37:59,823 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=57d13d30c22f0f7484ab8215a05b82e7, ASSIGN}] 2024-11-16T08:37:59,825 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=57d13d30c22f0f7484ab8215a05b82e7, ASSIGN 2024-11-16T08:37:59,826 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=57d13d30c22f0f7484ab8215a05b82e7, ASSIGN; state=OFFLINE, location=c27dd56784bd,44231,1731746278118; forceNewPlan=false, retain=false 2024-11-16T08:37:59,977 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=57d13d30c22f0f7484ab8215a05b82e7, regionState=OPENING, regionLocation=c27dd56784bd,44231,1731746278118 2024-11-16T08:37:59,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=57d13d30c22f0f7484ab8215a05b82e7, ASSIGN because future has completed 2024-11-16T08:37:59,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 57d13d30c22f0f7484ab8215a05b82e7, server=c27dd56784bd,44231,1731746278118}] 2024-11-16T08:38:00,139 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:00,139 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 57d13d30c22f0f7484ab8215a05b82e7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:38:00,139 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,140 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:38:00,140 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,140 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,141 INFO [StoreOpener-57d13d30c22f0f7484ab8215a05b82e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,142 INFO [StoreOpener-57d13d30c22f0f7484ab8215a05b82e7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 57d13d30c22f0f7484ab8215a05b82e7 columnFamilyName info 2024-11-16T08:38:00,142 DEBUG [StoreOpener-57d13d30c22f0f7484ab8215a05b82e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:00,143 INFO [StoreOpener-57d13d30c22f0f7484ab8215a05b82e7-1 {}] regionserver.HStore(327): Store=57d13d30c22f0f7484ab8215a05b82e7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:38:00,143 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,143 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,144 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,144 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,144 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,146 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,147 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:38:00,148 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 57d13d30c22f0f7484ab8215a05b82e7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690900, jitterRate=-0.12147608399391174}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T08:38:00,148 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:00,148 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 57d13d30c22f0f7484ab8215a05b82e7: Running coprocessor pre-open hook at 1731746280140Writing region info on filesystem at 1731746280140Initializing all the Stores at 1731746280141 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746280141Cleaning up temporary data from old regions at 1731746280144 (+3 ms)Running coprocessor post-open hooks at 1731746280148 (+4 ms)Region opened successfully at 1731746280148 2024-11-16T08:38:00,149 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7., pid=6, masterSystemTime=1731746280135 2024-11-16T08:38:00,151 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:00,152 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:00,152 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=57d13d30c22f0f7484ab8215a05b82e7, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,44231,1731746278118 2024-11-16T08:38:00,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 57d13d30c22f0f7484ab8215a05b82e7, server=c27dd56784bd,44231,1731746278118 because future has completed 2024-11-16T08:38:00,160 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T08:38:00,160 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 57d13d30c22f0f7484ab8215a05b82e7, server=c27dd56784bd,44231,1731746278118 in 176 msec 2024-11-16T08:38:00,163 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T08:38:00,163 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=57d13d30c22f0f7484ab8215a05b82e7, ASSIGN in 337 msec 2024-11-16T08:38:00,164 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T08:38:00,164 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746280164"}]},"ts":"1731746280164"} 2024-11-16T08:38:00,166 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-16T08:38:00,167 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T08:38:00,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 793 msec 2024-11-16T08:38:00,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:00,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:01,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:01,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:02,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:02,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:03,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:03,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:04,182 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,212 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:04,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:04,715 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T08:38:04,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,717 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,743 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:04,758 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T08:38:04,758 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-16T08:38:05,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:05,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:06,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:06,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:07,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:07,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:08,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:08,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:09,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:09,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:09,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:38:09,413 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T08:38:09,413 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-16T08:38:09,418 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:09,419 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T08:38:09,419 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:09,419 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:09,423 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7., hostname=c27dd56784bd,44231,1731746278118, seqNum=2] 2024-11-16T08:38:09,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:09,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:09,436 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T08:38:09,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T08:38:09,438 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T08:38:09,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T08:38:09,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44231 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-16T08:38:09,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:09,600 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 57d13d30c22f0f7484ab8215a05b82e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T08:38:09,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/e22fb14846c94c0396495cc8198c6c80 is 1080, key is row0001/info:/1731746289424/Put/seqid=0 2024-11-16T08:38:09,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741837_1013 (size=6033) 2024-11-16T08:38:09,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741837_1013 (size=6033) 2024-11-16T08:38:09,635 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/e22fb14846c94c0396495cc8198c6c80 2024-11-16T08:38:09,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/e22fb14846c94c0396495cc8198c6c80 as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/e22fb14846c94c0396495cc8198c6c80 2024-11-16T08:38:09,648 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/e22fb14846c94c0396495cc8198c6c80, entries=1, sequenceid=5, filesize=5.9 K 2024-11-16T08:38:09,649 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 57d13d30c22f0f7484ab8215a05b82e7 in 49ms, sequenceid=5, compaction requested=false 2024-11-16T08:38:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 57d13d30c22f0f7484ab8215a05b82e7: 2024-11-16T08:38:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:09,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-16T08:38:09,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-16T08:38:09,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T08:38:09,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 216 msec 2024-11-16T08:38:09,660 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 228 msec 2024-11-16T08:38:10,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:10,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:11,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:11,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:12,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:12,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:13,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:13,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:14,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:14,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:15,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:15,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:16,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:16,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 after 68068ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:38:16,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:16,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta after 68063ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor206.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-16T08:38:17,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:17,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:18,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:18,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:19,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:19,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:19,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-16T08:38:19,443 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T08:38:19,446 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:19,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:19,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T08:38:19,450 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T08:38:19,451 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T08:38:19,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T08:38:19,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44231 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-16T08:38:19,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:19,605 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 57d13d30c22f0f7484ab8215a05b82e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T08:38:19,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/c8fd55fdb4014524b0b164791888fcbe is 1080, key is row0002/info:/1731746299444/Put/seqid=0 2024-11-16T08:38:19,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741838_1014 (size=6033) 2024-11-16T08:38:19,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741838_1014 (size=6033) 2024-11-16T08:38:19,615 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/c8fd55fdb4014524b0b164791888fcbe 2024-11-16T08:38:19,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/c8fd55fdb4014524b0b164791888fcbe as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/c8fd55fdb4014524b0b164791888fcbe 2024-11-16T08:38:19,628 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/c8fd55fdb4014524b0b164791888fcbe, entries=1, sequenceid=9, filesize=5.9 K 2024-11-16T08:38:19,629 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 57d13d30c22f0f7484ab8215a05b82e7 in 24ms, sequenceid=9, compaction requested=false 2024-11-16T08:38:19,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 57d13d30c22f0f7484ab8215a05b82e7: 2024-11-16T08:38:19,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:19,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-16T08:38:19,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-16T08:38:19,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-16T08:38:19,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-16T08:38:19,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-11-16T08:38:20,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:20,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:21,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:21,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:22,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:22,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:23,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:23,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:24,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:24,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:25,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:25,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:26,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:26,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:27,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:27,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:27,917 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T08:38:28,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:28,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:29,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:29,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-16T08:38:29,502 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T08:38:29,505 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C44231%2C1731746278118.1731746309505 2024-11-16T08:38:29,517 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:29,518 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:29,518 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:29,518 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:29,518 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:29,518 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746278778 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746309505 2024-11-16T08:38:29,519 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42707:42707),(127.0.0.1/127.0.0.1:44917:44917)] 2024-11-16T08:38:29,519 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746278778 is not closed yet, will try archiving it next time 2024-11-16T08:38:29,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741833_1009 (size=5546) 2024-11-16T08:38:29,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741833_1009 (size=5546) 2024-11-16T08:38:29,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:29,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:29,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T08:38:29,522 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T08:38:29,523 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T08:38:29,524 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T08:38:29,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44231 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-16T08:38:29,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:29,678 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 57d13d30c22f0f7484ab8215a05b82e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T08:38:29,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/5923559a0f7442fc80bec516082fc874 is 1080, key is row0003/info:/1731746309504/Put/seqid=0 2024-11-16T08:38:29,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741840_1016 (size=6033) 2024-11-16T08:38:29,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741840_1016 (size=6033) 2024-11-16T08:38:29,702 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/5923559a0f7442fc80bec516082fc874 2024-11-16T08:38:29,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/5923559a0f7442fc80bec516082fc874 as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/5923559a0f7442fc80bec516082fc874 2024-11-16T08:38:29,714 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/5923559a0f7442fc80bec516082fc874, entries=1, sequenceid=13, filesize=5.9 K 2024-11-16T08:38:29,715 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 57d13d30c22f0f7484ab8215a05b82e7 in 38ms, sequenceid=13, compaction requested=true 2024-11-16T08:38:29,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 57d13d30c22f0f7484ab8215a05b82e7: 2024-11-16T08:38:29,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:29,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-16T08:38:29,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-16T08:38:29,720 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-16T08:38:29,720 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 193 msec 2024-11-16T08:38:29,722 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 201 msec 2024-11-16T08:38:30,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:30,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:31,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:31,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:32,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:32,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:33,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:33,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:34,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:34,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:35,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:35,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:36,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:36,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:37,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:37,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:38,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:38,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:39,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:39,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:39,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-16T08:38:39,562 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T08:38:39,562 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:38:39,563 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:38:39,564 DEBUG [Time-limited test {}] regionserver.HStore(1541): 57d13d30c22f0f7484ab8215a05b82e7/info is initiating minor compaction (all files) 2024-11-16T08:38:39,564 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T08:38:39,564 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:39,564 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 57d13d30c22f0f7484ab8215a05b82e7/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:39,564 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/e22fb14846c94c0396495cc8198c6c80, hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/c8fd55fdb4014524b0b164791888fcbe, hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/5923559a0f7442fc80bec516082fc874] into tmpdir=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp, totalSize=17.7 K 2024-11-16T08:38:39,565 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e22fb14846c94c0396495cc8198c6c80, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731746289424 2024-11-16T08:38:39,565 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c8fd55fdb4014524b0b164791888fcbe, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731746299444 2024-11-16T08:38:39,565 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5923559a0f7442fc80bec516082fc874, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731746309504 2024-11-16T08:38:39,577 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 57d13d30c22f0f7484ab8215a05b82e7#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:38:39,578 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/7d19e06354154c0aa7ee9aecca3e07a8 is 1080, key is row0001/info:/1731746289424/Put/seqid=0 2024-11-16T08:38:39,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741841_1017 (size=8296) 2024-11-16T08:38:39,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741841_1017 (size=8296) 2024-11-16T08:38:39,590 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/7d19e06354154c0aa7ee9aecca3e07a8 as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/7d19e06354154c0aa7ee9aecca3e07a8 2024-11-16T08:38:39,598 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 57d13d30c22f0f7484ab8215a05b82e7/info of 57d13d30c22f0f7484ab8215a05b82e7 into 7d19e06354154c0aa7ee9aecca3e07a8(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:38:39,598 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 57d13d30c22f0f7484ab8215a05b82e7: 2024-11-16T08:38:39,605 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C44231%2C1731746278118.1731746319605 2024-11-16T08:38:39,611 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:39,611 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:39,612 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:39,612 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:39,612 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:39,612 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746309505 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746319605 2024-11-16T08:38:39,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741839_1015 (size=2520) 2024-11-16T08:38:39,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741839_1015 (size=2520) 2024-11-16T08:38:39,614 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42707:42707),(127.0.0.1/127.0.0.1:44917:44917)] 2024-11-16T08:38:39,614 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746309505 is not closed yet, will try archiving it next time 2024-11-16T08:38:39,614 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746278778 to hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/oldWALs/c27dd56784bd%2C44231%2C1731746278118.1731746278778 2024-11-16T08:38:39,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:39,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:39,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T08:38:39,618 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-16T08:38:39,619 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-16T08:38:39,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-16T08:38:39,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44231 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-16T08:38:39,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:39,773 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 57d13d30c22f0f7484ab8215a05b82e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T08:38:39,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/2bcb3cfa12f74dbf9f91e5951197202c is 1080, key is row0000/info:/1731746319599/Put/seqid=0 2024-11-16T08:38:39,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741843_1019 (size=6033) 2024-11-16T08:38:39,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741843_1019 (size=6033) 2024-11-16T08:38:39,786 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/2bcb3cfa12f74dbf9f91e5951197202c 2024-11-16T08:38:39,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/2bcb3cfa12f74dbf9f91e5951197202c as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/2bcb3cfa12f74dbf9f91e5951197202c 2024-11-16T08:38:39,799 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/2bcb3cfa12f74dbf9f91e5951197202c, entries=1, sequenceid=18, filesize=5.9 K 2024-11-16T08:38:39,800 INFO [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 57d13d30c22f0f7484ab8215a05b82e7 in 28ms, sequenceid=18, compaction requested=false 2024-11-16T08:38:39,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 57d13d30c22f0f7484ab8215a05b82e7: 2024-11-16T08:38:39,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:39,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-16T08:38:39,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-16T08:38:39,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-16T08:38:39,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-11-16T08:38:39,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-11-16T08:38:40,181 INFO [master/c27dd56784bd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T08:38:40,181 INFO [master/c27dd56784bd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T08:38:40,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:40,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:41,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:41,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:42,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:42,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:43,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:43,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:44,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:44,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:45,140 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 57d13d30c22f0f7484ab8215a05b82e7, had cached 0 bytes from a total of 14329 2024-11-16T08:38:45,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:45,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:46,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:46,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:47,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:47,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:48,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:48,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:49,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:49,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-16T08:38:49,703 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-16T08:38:49,707 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C44231%2C1731746278118.1731746329707 2024-11-16T08:38:49,718 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,718 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,718 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,718 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,718 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,718 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746319605 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746329707 2024-11-16T08:38:49,719 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42707:42707),(127.0.0.1/127.0.0.1:44917:44917)] 2024-11-16T08:38:49,720 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746319605 is not closed yet, will try archiving it next time 2024-11-16T08:38:49,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T08:38:49,720 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/WALs/c27dd56784bd,44231,1731746278118/c27dd56784bd%2C44231%2C1731746278118.1731746309505 to hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/oldWALs/c27dd56784bd%2C44231%2C1731746278118.1731746309505 2024-11-16T08:38:49,720 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:38:49,720 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:38:49,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:38:49,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:38:49,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741842_1018 (size=2026) 2024-11-16T08:38:49,721 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T08:38:49,721 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1798467448, stopped=false 2024-11-16T08:38:49,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741842_1018 (size=2026) 2024-11-16T08:38:49,721 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T08:38:49,724 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c27dd56784bd,37905,1731746277937 2024-11-16T08:38:49,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:38:49,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:49,744 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:38:49,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:38:49,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:49,744 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:38:49,744 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:38:49,744 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:38:49,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:38:49,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:38:49,744 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c27dd56784bd,44231,1731746278118' ***** 2024-11-16T08:38:49,745 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T08:38:49,745 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T08:38:49,745 INFO [RS:0;c27dd56784bd:44231 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T08:38:49,745 INFO [RS:0;c27dd56784bd:44231 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T08:38:49,745 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(3091): Received CLOSE for 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:49,745 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T08:38:49,745 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(959): stopping server c27dd56784bd,44231,1731746278118 2024-11-16T08:38:49,745 INFO [RS:0;c27dd56784bd:44231 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:38:49,745 INFO [RS:0;c27dd56784bd:44231 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c27dd56784bd:44231. 2024-11-16T08:38:49,745 DEBUG [RS:0;c27dd56784bd:44231 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:38:49,745 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 57d13d30c22f0f7484ab8215a05b82e7, disabling compactions & flushes 2024-11-16T08:38:49,745 DEBUG [RS:0;c27dd56784bd:44231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:38:49,746 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:49,746 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T08:38:49,746 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T08:38:49,746 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T08:38:49,746 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:49,746 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T08:38:49,746 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. after waiting 0 ms 2024-11-16T08:38:49,746 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:49,746 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-16T08:38:49,746 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 57d13d30c22f0f7484ab8215a05b82e7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T08:38:49,746 DEBUG [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 57d13d30c22f0f7484ab8215a05b82e7=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.} 2024-11-16T08:38:49,746 DEBUG [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 57d13d30c22f0f7484ab8215a05b82e7 2024-11-16T08:38:49,746 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:38:49,746 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:38:49,746 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:38:49,746 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:38:49,746 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:38:49,746 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-16T08:38:49,756 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/8d4ff5ddb1554af096c5e6276eb054f4 is 1080, key is row0001/info:/1731746329705/Put/seqid=0 2024-11-16T08:38:49,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741845_1021 (size=6033) 2024-11-16T08:38:49,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741845_1021 (size=6033) 2024-11-16T08:38:49,761 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/8d4ff5ddb1554af096c5e6276eb054f4 2024-11-16T08:38:49,763 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/.tmp/info/0b23ec58b12e48f3b8d0c978e8a28999 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7./info:regioninfo/1731746280152/Put/seqid=0 2024-11-16T08:38:49,768 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/.tmp/info/8d4ff5ddb1554af096c5e6276eb054f4 as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/8d4ff5ddb1554af096c5e6276eb054f4 2024-11-16T08:38:49,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741846_1022 (size=7308) 2024-11-16T08:38:49,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741846_1022 (size=7308) 2024-11-16T08:38:49,778 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/8d4ff5ddb1554af096c5e6276eb054f4, entries=1, sequenceid=22, filesize=5.9 K 2024-11-16T08:38:49,779 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/.tmp/info/0b23ec58b12e48f3b8d0c978e8a28999 2024-11-16T08:38:49,779 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 57d13d30c22f0f7484ab8215a05b82e7 in 33ms, sequenceid=22, compaction requested=true 2024-11-16T08:38:49,780 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/e22fb14846c94c0396495cc8198c6c80, hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/c8fd55fdb4014524b0b164791888fcbe, hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/5923559a0f7442fc80bec516082fc874] to archive 2024-11-16T08:38:49,781 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T08:38:49,782 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/e22fb14846c94c0396495cc8198c6c80 to hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/e22fb14846c94c0396495cc8198c6c80 2024-11-16T08:38:49,784 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/c8fd55fdb4014524b0b164791888fcbe to hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/c8fd55fdb4014524b0b164791888fcbe 2024-11-16T08:38:49,787 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/5923559a0f7442fc80bec516082fc874 to hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/info/5923559a0f7442fc80bec516082fc874 2024-11-16T08:38:49,787 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c27dd56784bd:37905 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T08:38:49,787 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e22fb14846c94c0396495cc8198c6c80=6033, c8fd55fdb4014524b0b164791888fcbe=6033, 5923559a0f7442fc80bec516082fc874=6033] 2024-11-16T08:38:49,791 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/57d13d30c22f0f7484ab8215a05b82e7/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-16T08:38:49,792 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:49,792 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 57d13d30c22f0f7484ab8215a05b82e7: Waiting for close lock at 1731746329745Running coprocessor pre-close hooks at 1731746329745Disabling compacts and flushes for region at 1731746329745Disabling writes for close at 1731746329746 (+1 ms)Obtaining lock to block concurrent updates at 1731746329746Preparing flush snapshotting stores in 57d13d30c22f0f7484ab8215a05b82e7 at 1731746329746Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731746329746Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. at 1731746329752 (+6 ms)Flushing 57d13d30c22f0f7484ab8215a05b82e7/info: creating writer at 1731746329752Flushing 57d13d30c22f0f7484ab8215a05b82e7/info: appending metadata at 1731746329755 (+3 ms)Flushing 57d13d30c22f0f7484ab8215a05b82e7/info: closing flushed file at 1731746329755Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8882df5: reopening flushed file at 1731746329767 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 57d13d30c22f0f7484ab8215a05b82e7 in 33ms, sequenceid=22, compaction requested=true at 1731746329779 (+12 ms)Writing region close event to WAL at 1731746329788 (+9 ms)Running coprocessor post-close hooks at 1731746329792 (+4 ms)Closed at 1731746329792 2024-11-16T08:38:49,792 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731746279374.57d13d30c22f0f7484ab8215a05b82e7. 2024-11-16T08:38:49,805 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/.tmp/ns/f524cc37987f4e32a3f0b5a06348889a is 43, key is default/ns:d/1731746279268/Put/seqid=0 2024-11-16T08:38:49,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741847_1023 (size=5153) 2024-11-16T08:38:49,810 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/.tmp/ns/f524cc37987f4e32a3f0b5a06348889a 2024-11-16T08:38:49,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741847_1023 (size=5153) 2024-11-16T08:38:49,830 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/.tmp/table/d6e3ec4862894e43946de3a25a095181 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731746280164/Put/seqid=0 2024-11-16T08:38:49,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741848_1024 (size=5508) 2024-11-16T08:38:49,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741848_1024 (size=5508) 2024-11-16T08:38:49,835 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/.tmp/table/d6e3ec4862894e43946de3a25a095181 2024-11-16T08:38:49,841 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/.tmp/info/0b23ec58b12e48f3b8d0c978e8a28999 as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/info/0b23ec58b12e48f3b8d0c978e8a28999 2024-11-16T08:38:49,847 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/info/0b23ec58b12e48f3b8d0c978e8a28999, entries=10, sequenceid=11, filesize=7.1 K 2024-11-16T08:38:49,848 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/.tmp/ns/f524cc37987f4e32a3f0b5a06348889a as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/ns/f524cc37987f4e32a3f0b5a06348889a 2024-11-16T08:38:49,854 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/ns/f524cc37987f4e32a3f0b5a06348889a, entries=2, sequenceid=11, filesize=5.0 K 2024-11-16T08:38:49,855 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/.tmp/table/d6e3ec4862894e43946de3a25a095181 as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/table/d6e3ec4862894e43946de3a25a095181 2024-11-16T08:38:49,860 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/table/d6e3ec4862894e43946de3a25a095181, entries=2, sequenceid=11, filesize=5.4 K 2024-11-16T08:38:49,861 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 115ms, sequenceid=11, compaction requested=false 2024-11-16T08:38:49,866 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-16T08:38:49,866 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:38:49,866 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:38:49,866 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746329746Running coprocessor pre-close hooks at 1731746329746Disabling compacts and flushes for region at 1731746329746Disabling writes for close at 1731746329746Obtaining lock to block concurrent updates at 1731746329746Preparing flush snapshotting stores in 1588230740 at 1731746329746Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731746329746Flushing stores of hbase:meta,,1.1588230740 at 1731746329747 (+1 ms)Flushing 1588230740/info: creating writer at 1731746329747Flushing 1588230740/info: appending metadata at 1731746329763 (+16 ms)Flushing 1588230740/info: closing flushed file at 1731746329763Flushing 1588230740/ns: creating writer at 1731746329786 (+23 ms)Flushing 1588230740/ns: appending metadata at 1731746329805 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731746329805Flushing 1588230740/table: creating writer at 1731746329815 (+10 ms)Flushing 1588230740/table: appending metadata at 1731746329829 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731746329830 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e0b70e2: reopening flushed file at 1731746329840 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33642a41: reopening flushed file at 1731746329847 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c96841: reopening flushed file at 1731746329854 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 115ms, sequenceid=11, compaction requested=false at 1731746329861 (+7 ms)Writing region close event to WAL at 1731746329862 (+1 ms)Running coprocessor post-close hooks at 1731746329866 (+4 ms)Closed at 1731746329866 2024-11-16T08:38:49,867 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T08:38:49,946 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(976): stopping server c27dd56784bd,44231,1731746278118; all regions closed. 2024-11-16T08:38:49,947 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,947 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,947 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,947 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,947 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741834_1010 (size=3306) 2024-11-16T08:38:49,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741834_1010 (size=3306) 2024-11-16T08:38:49,952 DEBUG [RS:0;c27dd56784bd:44231 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/oldWALs 2024-11-16T08:38:49,952 INFO [RS:0;c27dd56784bd:44231 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C44231%2C1731746278118.meta:.meta(num 1731746279116) 2024-11-16T08:38:49,952 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,952 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,952 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,953 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,953 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:49,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741844_1020 (size=1252) 2024-11-16T08:38:49,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741844_1020 (size=1252) 2024-11-16T08:38:49,957 DEBUG [RS:0;c27dd56784bd:44231 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/oldWALs 2024-11-16T08:38:49,957 INFO [RS:0;c27dd56784bd:44231 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C44231%2C1731746278118:(num 1731746329707) 2024-11-16T08:38:49,957 DEBUG [RS:0;c27dd56784bd:44231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:38:49,957 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:38:49,957 INFO [RS:0;c27dd56784bd:44231 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:38:49,958 INFO [RS:0;c27dd56784bd:44231 {}] hbase.ChoreService(370): Chore service for: regionserver/c27dd56784bd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T08:38:49,958 INFO [RS:0;c27dd56784bd:44231 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:38:49,958 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:38:49,958 INFO [RS:0;c27dd56784bd:44231 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44231 2024-11-16T08:38:49,965 INFO [RS:0;c27dd56784bd:44231 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:38:49,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c27dd56784bd,44231,1731746278118 2024-11-16T08:38:49,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:38:49,975 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c27dd56784bd,44231,1731746278118] 2024-11-16T08:38:49,986 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c27dd56784bd,44231,1731746278118 already deleted, retry=false 2024-11-16T08:38:49,986 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c27dd56784bd,44231,1731746278118 expired; onlineServers=0 2024-11-16T08:38:49,986 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c27dd56784bd,37905,1731746277937' ***** 2024-11-16T08:38:49,986 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T08:38:49,986 INFO [M:0;c27dd56784bd:37905 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:38:49,986 INFO [M:0;c27dd56784bd:37905 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:38:49,986 DEBUG [M:0;c27dd56784bd:37905 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T08:38:49,986 DEBUG [M:0;c27dd56784bd:37905 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T08:38:49,986 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T08:38:49,986 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746278465 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746278465,5,FailOnTimeoutGroup] 2024-11-16T08:38:49,986 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746278466 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746278466,5,FailOnTimeoutGroup] 2024-11-16T08:38:49,986 INFO [M:0;c27dd56784bd:37905 {}] hbase.ChoreService(370): Chore service for: master/c27dd56784bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T08:38:49,986 INFO [M:0;c27dd56784bd:37905 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:38:49,987 DEBUG [M:0;c27dd56784bd:37905 {}] master.HMaster(1795): Stopping service threads 2024-11-16T08:38:49,987 INFO [M:0;c27dd56784bd:37905 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T08:38:49,987 INFO [M:0;c27dd56784bd:37905 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:38:49,987 INFO [M:0;c27dd56784bd:37905 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T08:38:49,987 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T08:38:49,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T08:38:49,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:49,996 DEBUG [M:0;c27dd56784bd:37905 {}] zookeeper.ZKUtil(347): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T08:38:49,996 WARN [M:0;c27dd56784bd:37905 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T08:38:49,997 INFO [M:0;c27dd56784bd:37905 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/.lastflushedseqids 2024-11-16T08:38:50,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741849_1025 (size=130) 2024-11-16T08:38:50,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741849_1025 (size=130) 2024-11-16T08:38:50,003 INFO [M:0;c27dd56784bd:37905 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T08:38:50,003 INFO [M:0;c27dd56784bd:37905 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T08:38:50,003 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:38:50,003 INFO [M:0;c27dd56784bd:37905 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:38:50,003 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:38:50,003 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:38:50,003 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:38:50,003 INFO [M:0;c27dd56784bd:37905 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.57 KB heapSize=54.98 KB 2024-11-16T08:38:50,019 DEBUG [M:0;c27dd56784bd:37905 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/15a3931f18d44a8ab59883699a8cd98d is 82, key is hbase:meta,,1/info:regioninfo/1731746279205/Put/seqid=0 2024-11-16T08:38:50,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741850_1026 (size=5672) 2024-11-16T08:38:50,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741850_1026 (size=5672) 2024-11-16T08:38:50,025 INFO [M:0;c27dd56784bd:37905 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/15a3931f18d44a8ab59883699a8cd98d 2024-11-16T08:38:50,046 DEBUG [M:0;c27dd56784bd:37905 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9e9d1b0449724459985e03b279130016 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731746280169/Put/seqid=0 2024-11-16T08:38:50,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741851_1027 (size=7821) 2024-11-16T08:38:50,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741851_1027 (size=7821) 2024-11-16T08:38:50,052 INFO [M:0;c27dd56784bd:37905 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.97 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9e9d1b0449724459985e03b279130016 2024-11-16T08:38:50,056 INFO [M:0;c27dd56784bd:37905 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9e9d1b0449724459985e03b279130016 2024-11-16T08:38:50,073 DEBUG [M:0;c27dd56784bd:37905 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b25621c52bd34d8c817664f11199f7a7 is 69, key is c27dd56784bd,44231,1731746278118/rs:state/1731746278590/Put/seqid=0 2024-11-16T08:38:50,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:38:50,075 INFO [RS:0;c27dd56784bd:44231 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:38:50,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44231-0x10142cb25cb0001, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:38:50,075 INFO [RS:0;c27dd56784bd:44231 {}] regionserver.HRegionServer(1031): Exiting; stopping=c27dd56784bd,44231,1731746278118; zookeeper connection closed. 2024-11-16T08:38:50,076 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@63e69b4b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@63e69b4b 2024-11-16T08:38:50,076 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T08:38:50,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741852_1028 (size=5156) 2024-11-16T08:38:50,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741852_1028 (size=5156) 2024-11-16T08:38:50,077 INFO [M:0;c27dd56784bd:37905 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b25621c52bd34d8c817664f11199f7a7 2024-11-16T08:38:50,098 DEBUG [M:0;c27dd56784bd:37905 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3279c49fde0c44aaabc91184e68fd9e6 is 52, key is load_balancer_on/state:d/1731746279369/Put/seqid=0 2024-11-16T08:38:50,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741853_1029 (size=5056) 2024-11-16T08:38:50,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741853_1029 (size=5056) 2024-11-16T08:38:50,106 INFO [M:0;c27dd56784bd:37905 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3279c49fde0c44aaabc91184e68fd9e6 2024-11-16T08:38:50,113 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/15a3931f18d44a8ab59883699a8cd98d as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/15a3931f18d44a8ab59883699a8cd98d 2024-11-16T08:38:50,118 INFO [M:0;c27dd56784bd:37905 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/15a3931f18d44a8ab59883699a8cd98d, entries=8, sequenceid=121, filesize=5.5 K 2024-11-16T08:38:50,119 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9e9d1b0449724459985e03b279130016 as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9e9d1b0449724459985e03b279130016 2024-11-16T08:38:50,124 INFO [M:0;c27dd56784bd:37905 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9e9d1b0449724459985e03b279130016 2024-11-16T08:38:50,125 INFO [M:0;c27dd56784bd:37905 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9e9d1b0449724459985e03b279130016, entries=14, sequenceid=121, filesize=7.6 K 2024-11-16T08:38:50,126 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b25621c52bd34d8c817664f11199f7a7 as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b25621c52bd34d8c817664f11199f7a7 2024-11-16T08:38:50,131 INFO [M:0;c27dd56784bd:37905 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b25621c52bd34d8c817664f11199f7a7, entries=1, sequenceid=121, filesize=5.0 K 2024-11-16T08:38:50,132 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3279c49fde0c44aaabc91184e68fd9e6 as hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3279c49fde0c44aaabc91184e68fd9e6 2024-11-16T08:38:50,138 INFO [M:0;c27dd56784bd:37905 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43867/user/jenkins/test-data/e3861ce6-7724-d6cf-548a-5c38c311e988/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3279c49fde0c44aaabc91184e68fd9e6, entries=1, sequenceid=121, filesize=4.9 K 2024-11-16T08:38:50,139 INFO [M:0;c27dd56784bd:37905 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.57 KB/44620, heapSize ~54.92 KB/56240, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 136ms, sequenceid=121, compaction requested=false 2024-11-16T08:38:50,144 INFO [M:0;c27dd56784bd:37905 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:38:50,144 DEBUG [M:0;c27dd56784bd:37905 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746330003Disabling compacts and flushes for region at 1731746330003Disabling writes for close at 1731746330003Obtaining lock to block concurrent updates at 1731746330003Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731746330003Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44620, getHeapSize=56240, getOffHeapSize=0, getCellsCount=140 at 1731746330004 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731746330004Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731746330004Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731746330019 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731746330019Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731746330030 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731746330045 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731746330045Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731746330057 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731746330072 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731746330072Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731746330082 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731746330097 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731746330097Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fce30d1: reopening flushed file at 1731746330112 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4168cc03: reopening flushed file at 1731746330118 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27576cbd: reopening flushed file at 1731746330125 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72567667: reopening flushed file at 1731746330131 (+6 ms)Finished flush of dataSize ~43.57 KB/44620, heapSize ~54.92 KB/56240, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 136ms, sequenceid=121, compaction requested=false at 1731746330139 (+8 ms)Writing region close event to WAL at 1731746330144 (+5 ms)Closed at 1731746330144 2024-11-16T08:38:50,144 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:50,144 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:50,145 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:50,145 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:50,145 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:38:50,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45569 is added to blk_1073741830_1006 (size=53017) 2024-11-16T08:38:50,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45583 is added to blk_1073741830_1006 (size=53017) 2024-11-16T08:38:50,147 INFO [M:0;c27dd56784bd:37905 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T08:38:50,147 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:38:50,148 INFO [M:0;c27dd56784bd:37905 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37905 2024-11-16T08:38:50,148 INFO [M:0;c27dd56784bd:37905 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:38:50,254 INFO [M:0;c27dd56784bd:37905 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:38:50,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:38:50,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37905-0x10142cb25cb0000, quorum=127.0.0.1:56514, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:38:50,258 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c734161{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:38:50,258 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75e789f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:38:50,259 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:38:50,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14a6d451{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:38:50,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34534d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.log.dir/,STOPPED} 2024-11-16T08:38:50,261 WARN [BP-1093759444-172.17.0.3-1731746275460 heartbeating to localhost/127.0.0.1:43867 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:38:50,261 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:38:50,261 WARN [BP-1093759444-172.17.0.3-1731746275460 heartbeating to localhost/127.0.0.1:43867 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1093759444-172.17.0.3-1731746275460 (Datanode Uuid 2d71fedb-c708-435b-906a-527cc165243a) service to localhost/127.0.0.1:43867 2024-11-16T08:38:50,261 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:38:50,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/data/data3/current/BP-1093759444-172.17.0.3-1731746275460 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:38:50,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/data/data4/current/BP-1093759444-172.17.0.3-1731746275460 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:38:50,262 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:38:50,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@40527499{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:38:50,266 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c1ed8d4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:38:50,266 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:38:50,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c13156e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:38:50,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4faae08f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.log.dir/,STOPPED} 2024-11-16T08:38:50,268 WARN [BP-1093759444-172.17.0.3-1731746275460 heartbeating to localhost/127.0.0.1:43867 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:38:50,268 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:38:50,268 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:38:50,268 WARN [BP-1093759444-172.17.0.3-1731746275460 heartbeating to localhost/127.0.0.1:43867 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1093759444-172.17.0.3-1731746275460 (Datanode Uuid 5e969dde-d51a-4ea6-a6f7-633a38482fcd) service to localhost/127.0.0.1:43867 2024-11-16T08:38:50,269 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/data/data1/current/BP-1093759444-172.17.0.3-1731746275460 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:38:50,269 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/cluster_7e9b31b8-5397-c3ee-d6d9-3d012629e910/data/data2/current/BP-1093759444-172.17.0.3-1731746275460 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:38:50,269 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:38:50,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43498b11{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:38:50,275 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1367dc96{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:38:50,275 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:38:50,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@190ad9e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:38:50,276 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20f59884{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.log.dir/,STOPPED} 2024-11-16T08:38:50,281 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T08:38:50,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T08:38:50,307 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 181) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:43867 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43867 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43867 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43867 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43867 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:43867 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/c27dd56784bd:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:43867 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:43867 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43867 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=296 (was 441), ProcessCount=11 (was 11), AvailableMemoryMB=1723 (was 2105) 2024-11-16T08:38:50,316 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=296, ProcessCount=11, AvailableMemoryMB=1723 2024-11-16T08:38:50,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T08:38:50,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.log.dir so I do NOT create it in target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3 2024-11-16T08:38:50,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/720049a6-f171-eb6d-6216-5f494fabd7cb/hadoop.tmp.dir so I do NOT create it in target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3 2024-11-16T08:38:50,316 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0, deleteOnExit=true 2024-11-16T08:38:50,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T08:38:50,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/test.cache.data in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.log.dir in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T08:38:50,317 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T08:38:50,317 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T08:38:50,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:38:50,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:38:50,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T08:38:50,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/nfs.dump.dir in system properties and HBase conf 2024-11-16T08:38:50,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/java.io.tmpdir in system properties and HBase conf 2024-11-16T08:38:50,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:38:50,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T08:38:50,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T08:38:50,331 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:38:50,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:50,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:50,636 INFO [regionserver/c27dd56784bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:38:50,679 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:38:50,685 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:38:50,686 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:38:50,686 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:38:50,686 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:38:50,687 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:38:50,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7193a060{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:38:50,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f1304aa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:38:50,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b6783f5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/java.io.tmpdir/jetty-localhost-44543-hadoop-hdfs-3_4_1-tests_jar-_-any-8683841787511883359/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:38:50,794 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@44a25975{HTTP/1.1, (http/1.1)}{localhost:44543} 2024-11-16T08:38:50,794 INFO [Time-limited test {}] server.Server(415): Started @249030ms 2024-11-16T08:38:50,806 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:38:51,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:38:51,064 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:38:51,086 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:38:51,086 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:38:51,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:38:51,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23e1642c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:38:51,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@601b78f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:38:51,191 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64358886{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/java.io.tmpdir/jetty-localhost-43905-hadoop-hdfs-3_4_1-tests_jar-_-any-18408138919451185261/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:38:51,191 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26a9d62d{HTTP/1.1, (http/1.1)}{localhost:43905} 2024-11-16T08:38:51,191 INFO [Time-limited test {}] server.Server(415): Started @249427ms 2024-11-16T08:38:51,192 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:38:51,231 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:38:51,235 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:38:51,236 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:38:51,236 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:38:51,236 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-16T08:38:51,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47bcda8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:38:51,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f4aa33e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:38:51,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:51,346 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20408ea5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/java.io.tmpdir/jetty-localhost-45415-hadoop-hdfs-3_4_1-tests_jar-_-any-10740370788414466287/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:38:51,347 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e2ab763{HTTP/1.1, (http/1.1)}{localhost:45415} 2024-11-16T08:38:51,347 INFO [Time-limited test {}] server.Server(415): Started @249583ms 2024-11-16T08:38:51,348 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:38:51,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:52,340 WARN [Thread-1954 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/data/data1/current/BP-1154009425-172.17.0.3-1731746330336/current, will proceed with Du for space computation calculation, 2024-11-16T08:38:52,340 WARN [Thread-1955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/data/data2/current/BP-1154009425-172.17.0.3-1731746330336/current, will proceed with Du for space computation calculation, 2024-11-16T08:38:52,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:52,360 WARN [Thread-1918 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:38:52,362 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66dcf3521f04a13 with lease ID 0x989bb43256ff4e3b: Processing first storage report for DS-8075f97f-8613-46c7-a8e9-dd30f506458b from datanode DatanodeRegistration(127.0.0.1:43469, datanodeUuid=e4b9af89-3b71-409a-adbe-3debfbd4f60f, infoPort=41793, infoSecurePort=0, ipcPort=35855, storageInfo=lv=-57;cid=testClusterID;nsid=1138263494;c=1731746330336) 2024-11-16T08:38:52,362 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66dcf3521f04a13 with lease ID 0x989bb43256ff4e3b: from storage DS-8075f97f-8613-46c7-a8e9-dd30f506458b node DatanodeRegistration(127.0.0.1:43469, datanodeUuid=e4b9af89-3b71-409a-adbe-3debfbd4f60f, infoPort=41793, infoSecurePort=0, ipcPort=35855, storageInfo=lv=-57;cid=testClusterID;nsid=1138263494;c=1731746330336), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T08:38:52,362 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66dcf3521f04a13 with lease ID 0x989bb43256ff4e3b: Processing first storage report for DS-ad26f693-bfe0-4eb0-8217-f08c20646ceb from datanode DatanodeRegistration(127.0.0.1:43469, datanodeUuid=e4b9af89-3b71-409a-adbe-3debfbd4f60f, infoPort=41793, infoSecurePort=0, ipcPort=35855, storageInfo=lv=-57;cid=testClusterID;nsid=1138263494;c=1731746330336) 2024-11-16T08:38:52,362 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66dcf3521f04a13 with lease ID 0x989bb43256ff4e3b: from storage DS-ad26f693-bfe0-4eb0-8217-f08c20646ceb node DatanodeRegistration(127.0.0.1:43469, datanodeUuid=e4b9af89-3b71-409a-adbe-3debfbd4f60f, infoPort=41793, infoSecurePort=0, ipcPort=35855, storageInfo=lv=-57;cid=testClusterID;nsid=1138263494;c=1731746330336), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:38:52,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:52,501 WARN [Thread-1966 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/data/data4/current/BP-1154009425-172.17.0.3-1731746330336/current, will proceed with Du for space computation calculation, 2024-11-16T08:38:52,501 WARN [Thread-1965 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/data/data3/current/BP-1154009425-172.17.0.3-1731746330336/current, will proceed with Du for space computation calculation, 2024-11-16T08:38:52,517 WARN [Thread-1941 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:38:52,519 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x39661c2177473429 with lease ID 0x989bb43256ff4e3c: Processing first storage report for DS-e14c7b96-90d1-414d-bded-3fd7b2b573e9 from datanode DatanodeRegistration(127.0.0.1:42791, datanodeUuid=8622081f-7e6d-4622-8803-3a1e0d008227, infoPort=42645, infoSecurePort=0, ipcPort=43455, storageInfo=lv=-57;cid=testClusterID;nsid=1138263494;c=1731746330336) 2024-11-16T08:38:52,519 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x39661c2177473429 with lease ID 0x989bb43256ff4e3c: from storage DS-e14c7b96-90d1-414d-bded-3fd7b2b573e9 node DatanodeRegistration(127.0.0.1:42791, datanodeUuid=8622081f-7e6d-4622-8803-3a1e0d008227, infoPort=42645, infoSecurePort=0, ipcPort=43455, storageInfo=lv=-57;cid=testClusterID;nsid=1138263494;c=1731746330336), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:38:52,519 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x39661c2177473429 with lease ID 0x989bb43256ff4e3c: Processing first storage report for DS-fb470aac-b00b-49a2-a622-b71b96412047 from datanode DatanodeRegistration(127.0.0.1:42791, datanodeUuid=8622081f-7e6d-4622-8803-3a1e0d008227, infoPort=42645, infoSecurePort=0, ipcPort=43455, storageInfo=lv=-57;cid=testClusterID;nsid=1138263494;c=1731746330336) 2024-11-16T08:38:52,519 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x39661c2177473429 with lease ID 0x989bb43256ff4e3c: from storage DS-fb470aac-b00b-49a2-a622-b71b96412047 node DatanodeRegistration(127.0.0.1:42791, datanodeUuid=8622081f-7e6d-4622-8803-3a1e0d008227, infoPort=42645, infoSecurePort=0, ipcPort=43455, storageInfo=lv=-57;cid=testClusterID;nsid=1138263494;c=1731746330336), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:38:52,577 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3 2024-11-16T08:38:52,582 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/zookeeper_0, clientPort=62779, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T08:38:52,583 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62779 2024-11-16T08:38:52,583 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:38:52,585 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:38:52,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:38:52,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:38:52,595 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4 with version=8 2024-11-16T08:38:52,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/hbase-staging 2024-11-16T08:38:52,597 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:38:52,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:38:52,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:38:52,597 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:38:52,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:38:52,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:38:52,597 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T08:38:52,597 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:38:52,598 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41263 2024-11-16T08:38:52,599 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41263 connecting to ZooKeeper ensemble=127.0.0.1:62779 2024-11-16T08:38:52,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:412630x0, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:38:52,680 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41263-0x10142cbfb4f0000 connected 2024-11-16T08:38:52,765 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:38:52,767 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:38:52,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:38:52,770 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4, hbase.cluster.distributed=false 2024-11-16T08:38:52,772 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:38:52,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41263 2024-11-16T08:38:52,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41263 2024-11-16T08:38:52,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41263 2024-11-16T08:38:52,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41263 2024-11-16T08:38:52,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41263 2024-11-16T08:38:52,793 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:38:52,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:38:52,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:38:52,793 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:38:52,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:38:52,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:38:52,793 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T08:38:52,793 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:38:52,794 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39489 2024-11-16T08:38:52,795 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39489 connecting to ZooKeeper ensemble=127.0.0.1:62779 2024-11-16T08:38:52,796 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:38:52,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:38:52,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:394890x0, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:38:52,807 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:394890x0, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:38:52,807 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39489-0x10142cbfb4f0001 connected 2024-11-16T08:38:52,808 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T08:38:52,808 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T08:38:52,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T08:38:52,810 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:38:52,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39489 2024-11-16T08:38:52,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39489 2024-11-16T08:38:52,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39489 2024-11-16T08:38:52,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39489 2024-11-16T08:38:52,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39489 2024-11-16T08:38:52,823 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c27dd56784bd:41263 2024-11-16T08:38:52,824 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c27dd56784bd,41263,1731746332596 2024-11-16T08:38:52,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:38:52,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:38:52,828 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c27dd56784bd,41263,1731746332596 2024-11-16T08:38:52,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:52,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T08:38:52,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:52,839 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T08:38:52,839 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c27dd56784bd,41263,1731746332596 from backup master directory 2024-11-16T08:38:52,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c27dd56784bd,41263,1731746332596 2024-11-16T08:38:52,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:38:52,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:38:52,849 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:38:52,849 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c27dd56784bd,41263,1731746332596 2024-11-16T08:38:52,854 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/hbase.id] with ID: efdac43f-3a8b-48ca-a411-006cac0eb2fd 2024-11-16T08:38:52,854 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/.tmp/hbase.id 2024-11-16T08:38:52,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:38:52,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:38:52,863 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/.tmp/hbase.id]:[hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/hbase.id] 2024-11-16T08:38:52,874 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:38:52,874 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T08:38:52,876 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T08:38:52,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:52,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:52,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:38:52,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:38:52,887 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:38:52,888 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T08:38:52,888 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:38:52,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:38:52,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:38:52,896 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store 2024-11-16T08:38:52,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:38:52,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:38:52,903 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:38:52,903 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:38:52,903 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:38:52,903 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:38:52,903 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:38:52,903 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:38:52,903 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:38:52,903 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746332903Disabling compacts and flushes for region at 1731746332903Disabling writes for close at 1731746332903Writing region close event to WAL at 1731746332903Closed at 1731746332903 2024-11-16T08:38:52,904 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/.initializing 2024-11-16T08:38:52,904 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/WALs/c27dd56784bd,41263,1731746332596 2024-11-16T08:38:52,906 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C41263%2C1731746332596, suffix=, logDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/WALs/c27dd56784bd,41263,1731746332596, archiveDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/oldWALs, maxLogs=10 2024-11-16T08:38:52,907 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C41263%2C1731746332596.1731746332907 2024-11-16T08:38:52,912 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/WALs/c27dd56784bd,41263,1731746332596/c27dd56784bd%2C41263%2C1731746332596.1731746332907 2024-11-16T08:38:52,916 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41793:41793),(127.0.0.1/127.0.0.1:42645:42645)] 2024-11-16T08:38:52,920 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:38:52,920 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:38:52,921 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,921 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,922 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,923 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T08:38:52,924 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:52,924 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:38:52,924 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,926 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T08:38:52,926 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:52,926 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:38:52,927 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,928 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T08:38:52,928 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:52,928 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:38:52,929 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T08:38:52,930 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:52,931 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:38:52,931 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,931 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,932 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,933 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,933 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,934 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T08:38:52,935 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:38:52,937 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:38:52,937 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=718505, jitterRate=-0.08637471497058868}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T08:38:52,938 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731746332921Initializing all the Stores at 1731746332922 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746332922Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746332922Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746332922Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746332922Cleaning up temporary data from old regions at 1731746332933 (+11 ms)Region opened successfully at 1731746332938 (+5 ms) 2024-11-16T08:38:52,938 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T08:38:52,941 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73e6bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:38:52,942 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T08:38:52,942 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T08:38:52,942 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T08:38:52,942 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T08:38:52,943 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T08:38:52,943 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T08:38:52,943 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T08:38:52,945 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T08:38:52,946 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T08:38:52,954 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T08:38:52,955 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T08:38:52,956 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T08:38:52,965 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T08:38:52,965 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T08:38:52,966 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T08:38:52,975 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T08:38:52,976 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T08:38:52,986 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T08:38:52,988 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T08:38:52,996 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T08:38:53,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:38:53,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:53,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:38:53,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:53,007 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c27dd56784bd,41263,1731746332596, sessionid=0x10142cbfb4f0000, setting cluster-up flag (Was=false) 2024-11-16T08:38:53,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:53,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:53,060 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T08:38:53,061 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,41263,1731746332596 2024-11-16T08:38:53,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:53,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:53,112 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T08:38:53,113 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,41263,1731746332596 2024-11-16T08:38:53,114 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T08:38:53,116 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T08:38:53,116 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T08:38:53,116 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T08:38:53,116 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c27dd56784bd,41263,1731746332596 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T08:38:53,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:38:53,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:38:53,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:38:53,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:38:53,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c27dd56784bd:0, corePoolSize=10, maxPoolSize=10 2024-11-16T08:38:53,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:38:53,118 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,120 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:38:53,120 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T08:38:53,121 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,121 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T08:38:53,126 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731746363126 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T08:38:53,127 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T08:38:53,128 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T08:38:53,128 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T08:38:53,128 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746333128,5,FailOnTimeoutGroup] 2024-11-16T08:38:53,128 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746333128,5,FailOnTimeoutGroup] 2024-11-16T08:38:53,128 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,128 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T08:38:53,128 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,128 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:38:53,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:38:53,131 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T08:38:53,131 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4 2024-11-16T08:38:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:38:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:38:53,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:38:53,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:38:53,140 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:38:53,140 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:38:53,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:38:53,142 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:38:53,142 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:38:53,143 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:38:53,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:38:53,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:38:53,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:38:53,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:38:53,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:38:53,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:38:53,149 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740 2024-11-16T08:38:53,150 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740 2024-11-16T08:38:53,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:38:53,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:38:53,152 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:38:53,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:38:53,155 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:38:53,156 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800304, jitterRate=0.017639756202697754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:38:53,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731746333137Initializing all the Stores at 1731746333138 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746333138Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746333138Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746333138Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746333138Cleaning up temporary data from old regions at 1731746333151 (+13 ms)Region opened successfully at 1731746333156 (+5 ms) 2024-11-16T08:38:53,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:38:53,156 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:38:53,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:38:53,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:38:53,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:38:53,157 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:38:53,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746333156Disabling compacts and flushes for region at 1731746333156Disabling writes for close at 1731746333157 (+1 ms)Writing region close event to WAL at 1731746333157Closed at 1731746333157 2024-11-16T08:38:53,158 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:38:53,158 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T08:38:53,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T08:38:53,160 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:38:53,161 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T08:38:53,213 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(746): ClusterId : efdac43f-3a8b-48ca-a411-006cac0eb2fd 2024-11-16T08:38:53,213 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T08:38:53,218 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T08:38:53,218 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T08:38:53,229 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T08:38:53,229 DEBUG [RS:0;c27dd56784bd:39489 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7df34046, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:38:53,243 DEBUG [RS:0;c27dd56784bd:39489 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c27dd56784bd:39489 2024-11-16T08:38:53,244 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T08:38:53,244 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T08:38:53,244 DEBUG [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T08:38:53,244 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(2659): reportForDuty to master=c27dd56784bd,41263,1731746332596 with port=39489, startcode=1731746332792 2024-11-16T08:38:53,245 DEBUG [RS:0;c27dd56784bd:39489 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T08:38:53,246 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47769, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T08:38:53,247 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41263 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c27dd56784bd,39489,1731746332792 2024-11-16T08:38:53,247 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41263 {}] master.ServerManager(517): Registering regionserver=c27dd56784bd,39489,1731746332792 2024-11-16T08:38:53,248 DEBUG [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4 2024-11-16T08:38:53,249 DEBUG [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33067 2024-11-16T08:38:53,249 DEBUG [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T08:38:53,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:38:53,260 DEBUG [RS:0;c27dd56784bd:39489 {}] zookeeper.ZKUtil(111): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c27dd56784bd,39489,1731746332792 2024-11-16T08:38:53,260 WARN [RS:0;c27dd56784bd:39489 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:38:53,260 INFO [RS:0;c27dd56784bd:39489 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:38:53,260 DEBUG [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792 2024-11-16T08:38:53,260 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c27dd56784bd,39489,1731746332792] 2024-11-16T08:38:53,263 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T08:38:53,265 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T08:38:53,265 INFO [RS:0;c27dd56784bd:39489 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T08:38:53,265 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,265 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T08:38:53,266 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T08:38:53,266 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,266 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,266 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,266 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,266 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,266 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,266 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:38:53,266 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,267 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,267 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,267 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,267 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,267 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:38:53,267 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:38:53,267 DEBUG [RS:0;c27dd56784bd:39489 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:38:53,267 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,267 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,267 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,267 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,267 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,267 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,39489,1731746332792-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:38:53,283 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T08:38:53,283 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,39489,1731746332792-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,283 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,283 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.Replication(171): c27dd56784bd,39489,1731746332792 started 2024-11-16T08:38:53,299 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,299 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(1482): Serving as c27dd56784bd,39489,1731746332792, RpcServer on c27dd56784bd/172.17.0.3:39489, sessionid=0x10142cbfb4f0001 2024-11-16T08:38:53,299 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T08:38:53,299 DEBUG [RS:0;c27dd56784bd:39489 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c27dd56784bd,39489,1731746332792 2024-11-16T08:38:53,299 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,39489,1731746332792' 2024-11-16T08:38:53,299 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T08:38:53,299 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T08:38:53,300 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T08:38:53,300 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T08:38:53,300 DEBUG [RS:0;c27dd56784bd:39489 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c27dd56784bd,39489,1731746332792 2024-11-16T08:38:53,300 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,39489,1731746332792' 2024-11-16T08:38:53,300 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T08:38:53,300 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T08:38:53,300 DEBUG [RS:0;c27dd56784bd:39489 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T08:38:53,300 INFO [RS:0;c27dd56784bd:39489 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T08:38:53,300 INFO [RS:0;c27dd56784bd:39489 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T08:38:53,311 WARN [c27dd56784bd:41263 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T08:38:53,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:53,402 INFO [RS:0;c27dd56784bd:39489 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C39489%2C1731746332792, suffix=, logDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792, archiveDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/oldWALs, maxLogs=32 2024-11-16T08:38:53,403 INFO [RS:0;c27dd56784bd:39489 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C39489%2C1731746332792.1731746333402 2024-11-16T08:38:53,408 INFO [RS:0;c27dd56784bd:39489 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.1731746333402 2024-11-16T08:38:53,409 DEBUG [RS:0;c27dd56784bd:39489 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41793:41793),(127.0.0.1/127.0.0.1:42645:42645)] 2024-11-16T08:38:53,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:53,561 DEBUG [c27dd56784bd:41263 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T08:38:53,562 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c27dd56784bd,39489,1731746332792 2024-11-16T08:38:53,563 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,39489,1731746332792, state=OPENING 2024-11-16T08:38:53,575 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T08:38:53,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:53,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:38:53,586 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:38:53,586 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:38:53,587 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:38:53,587 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,39489,1731746332792}] 2024-11-16T08:38:53,740 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T08:38:53,742 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53147, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T08:38:53,746 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T08:38:53,746 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:38:53,748 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C39489%2C1731746332792.meta, suffix=.meta, logDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792, archiveDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/oldWALs, maxLogs=32 2024-11-16T08:38:53,749 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C39489%2C1731746332792.meta.1731746333748.meta 2024-11-16T08:38:53,754 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.meta.1731746333748.meta 2024-11-16T08:38:53,755 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41793:41793),(127.0.0.1/127.0.0.1:42645:42645)] 2024-11-16T08:38:53,756 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:38:53,756 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T08:38:53,756 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T08:38:53,756 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T08:38:53,756 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T08:38:53,756 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:38:53,756 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T08:38:53,756 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T08:38:53,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:38:53,764 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:38:53,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:38:53,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:38:53,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:38:53,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:38:53,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:38:53,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:38:53,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:38:53,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:38:53,768 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:38:53,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:38:53,769 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:38:53,770 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740 2024-11-16T08:38:53,771 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740 2024-11-16T08:38:53,772 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:38:53,772 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:38:53,772 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:38:53,773 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:38:53,774 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=784553, jitterRate=-0.00238935649394989}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:38:53,774 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T08:38:53,775 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731746333756Writing region info on filesystem at 1731746333757 (+1 ms)Initializing all the Stores at 1731746333757Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746333757Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746333763 (+6 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746333763Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746333763Cleaning up temporary data from old regions at 1731746333772 (+9 ms)Running coprocessor post-open hooks at 1731746333774 (+2 ms)Region opened successfully at 1731746333775 (+1 ms) 2024-11-16T08:38:53,776 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731746333740 2024-11-16T08:38:53,778 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T08:38:53,778 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T08:38:53,779 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,39489,1731746332792 2024-11-16T08:38:53,780 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,39489,1731746332792, state=OPEN 2024-11-16T08:38:53,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:38:53,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:38:53,838 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c27dd56784bd,39489,1731746332792 2024-11-16T08:38:53,838 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:38:53,838 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:38:53,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T08:38:53,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,39489,1731746332792 in 251 msec 2024-11-16T08:38:53,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T08:38:53,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 685 msec 2024-11-16T08:38:53,848 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:38:53,848 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T08:38:53,849 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:38:53,849 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,39489,1731746332792, seqNum=-1] 2024-11-16T08:38:53,849 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:38:53,850 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49765, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:38:53,857 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 739 msec 2024-11-16T08:38:53,857 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731746333857, completionTime=-1 2024-11-16T08:38:53,857 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T08:38:53,857 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T08:38:53,859 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T08:38:53,859 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731746393859 2024-11-16T08:38:53,859 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731746453859 2024-11-16T08:38:53,859 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-16T08:38:53,859 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41263,1731746332596-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,860 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41263,1731746332596-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,860 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41263,1731746332596-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,860 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c27dd56784bd:41263, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,860 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,860 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,862 DEBUG [master/c27dd56784bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T08:38:53,864 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.014sec 2024-11-16T08:38:53,864 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T08:38:53,864 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T08:38:53,864 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T08:38:53,864 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T08:38:53,864 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T08:38:53,864 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41263,1731746332596-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:38:53,864 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41263,1731746332596-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T08:38:53,867 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T08:38:53,867 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T08:38:53,867 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41263,1731746332596-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:38:53,913 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56251406, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:38:53,913 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c27dd56784bd,41263,-1 for getting cluster id 2024-11-16T08:38:53,914 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T08:38:53,915 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'efdac43f-3a8b-48ca-a411-006cac0eb2fd' 2024-11-16T08:38:53,915 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T08:38:53,915 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "efdac43f-3a8b-48ca-a411-006cac0eb2fd" 2024-11-16T08:38:53,916 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56182010, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:38:53,916 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c27dd56784bd,41263,-1] 2024-11-16T08:38:53,916 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T08:38:53,916 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:38:53,917 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42276, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T08:38:53,918 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b5b2092, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:38:53,918 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:38:53,919 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,39489,1731746332792, seqNum=-1] 2024-11-16T08:38:53,919 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:38:53,921 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39370, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:38:53,923 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c27dd56784bd,41263,1731746332596 2024-11-16T08:38:53,923 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:38:53,925 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T08:38:53,925 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-16T08:38:53,926 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is c27dd56784bd,41263,1731746332596 2024-11-16T08:38:53,926 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@f9580cf 2024-11-16T08:38:53,926 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-16T08:38:53,927 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42286, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-16T08:38:53,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41263 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-16T08:38:53,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41263 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-16T08:38:53,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41263 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:38:53,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41263 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-16T08:38:53,930 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-16T08:38:53,931 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:53,931 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41263 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-16T08:38:53,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41263 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:38:53,932 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-16T08:38:53,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741835_1011 (size=381) 2024-11-16T08:38:53,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741835_1011 (size=381) 2024-11-16T08:38:53,943 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 74ff98a5d3b62c48bc6fc81d8bbb1f03, NAME => 'TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4 2024-11-16T08:38:53,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741836_1012 (size=64) 2024-11-16T08:38:53,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741836_1012 (size=64) 2024-11-16T08:38:53,950 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:38:53,950 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 74ff98a5d3b62c48bc6fc81d8bbb1f03, disabling compactions & flushes 2024-11-16T08:38:53,950 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:38:53,950 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:38:53,950 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. after waiting 0 ms 2024-11-16T08:38:53,950 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:38:53,950 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:38:53,950 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: Waiting for close lock at 1731746333950Disabling compacts and flushes for region at 1731746333950Disabling writes for close at 1731746333950Writing region close event to WAL at 1731746333950Closed at 1731746333950 2024-11-16T08:38:53,952 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-16T08:38:53,952 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731746333952"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731746333952"}]},"ts":"1731746333952"} 2024-11-16T08:38:53,954 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-16T08:38:53,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-16T08:38:53,955 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746333955"}]},"ts":"1731746333955"} 2024-11-16T08:38:53,958 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-16T08:38:53,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, ASSIGN}] 2024-11-16T08:38:53,959 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, ASSIGN 2024-11-16T08:38:53,960 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, ASSIGN; state=OFFLINE, location=c27dd56784bd,39489,1731746332792; forceNewPlan=false, retain=false 2024-11-16T08:38:54,111 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=74ff98a5d3b62c48bc6fc81d8bbb1f03, regionState=OPENING, regionLocation=c27dd56784bd,39489,1731746332792 2024-11-16T08:38:54,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, ASSIGN because future has completed 2024-11-16T08:38:54,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792}] 2024-11-16T08:38:54,272 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:38:54,272 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 74ff98a5d3b62c48bc6fc81d8bbb1f03, NAME => 'TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:38:54,272 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,272 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:38:54,272 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,273 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,284 INFO [StoreOpener-74ff98a5d3b62c48bc6fc81d8bbb1f03-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,285 INFO [StoreOpener-74ff98a5d3b62c48bc6fc81d8bbb1f03-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 74ff98a5d3b62c48bc6fc81d8bbb1f03 columnFamilyName info 2024-11-16T08:38:54,286 DEBUG [StoreOpener-74ff98a5d3b62c48bc6fc81d8bbb1f03-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:38:54,286 INFO [StoreOpener-74ff98a5d3b62c48bc6fc81d8bbb1f03-1 {}] regionserver.HStore(327): Store=74ff98a5d3b62c48bc6fc81d8bbb1f03/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:38:54,286 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,287 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,287 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,288 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,288 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,289 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,291 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:38:54,291 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 74ff98a5d3b62c48bc6fc81d8bbb1f03; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730739, jitterRate=-0.07081776857376099}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T08:38:54,291 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:38:54,292 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: Running coprocessor pre-open hook at 1731746334273Writing region info on filesystem at 1731746334273Initializing all the Stores at 1731746334274 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746334274Cleaning up temporary data from old regions at 1731746334288 (+14 ms)Running coprocessor post-open hooks at 1731746334291 (+3 ms)Region opened successfully at 1731746334292 (+1 ms) 2024-11-16T08:38:54,293 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., pid=6, masterSystemTime=1731746334266 2024-11-16T08:38:54,295 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:38:54,295 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:38:54,296 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=74ff98a5d3b62c48bc6fc81d8bbb1f03, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,39489,1731746332792 2024-11-16T08:38:54,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792 because future has completed 2024-11-16T08:38:54,301 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-16T08:38:54,302 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792 in 185 msec 2024-11-16T08:38:54,304 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-16T08:38:54,304 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, ASSIGN in 344 msec 2024-11-16T08:38:54,305 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-16T08:38:54,306 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731746334305"}]},"ts":"1731746334305"} 2024-11-16T08:38:54,308 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-16T08:38:54,309 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-16T08:38:54,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 381 msec 2024-11-16T08:38:54,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:54,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:54,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,794 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,819 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:54,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,326 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T08:38:55,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,329 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:55,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,374 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:38:55,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:56,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:56,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:57,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:57,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:58,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:58,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:59,263 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T08:38:59,264 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-16T08:38:59,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:38:59,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-16T08:38:59,419 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-16T08:38:59,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-16T08:38:59,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:00,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:00,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:01,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:01,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:02,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:02,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:03,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:03,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41263 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-16T08:39:03,983 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-16T08:39:03,983 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-16T08:39:03,986 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-16T08:39:03,986 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:39:03,989 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., hostname=c27dd56784bd,39489,1731746332792, seqNum=2] 2024-11-16T08:39:04,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:04,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 74ff98a5d3b62c48bc6fc81d8bbb1f03 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:39:04,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/d2c784fc2d34409aa41bb681e809ae4f is 1080, key is row0001/info:/1731746343991/Put/seqid=0 2024-11-16T08:39:04,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741837_1013 (size=12509) 2024-11-16T08:39:04,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741837_1013 (size=12509) 2024-11-16T08:39:04,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/d2c784fc2d34409aa41bb681e809ae4f 2024-11-16T08:39:04,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/d2c784fc2d34409aa41bb681e809ae4f as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/d2c784fc2d34409aa41bb681e809ae4f 2024-11-16T08:39:04,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/d2c784fc2d34409aa41bb681e809ae4f, entries=7, sequenceid=11, filesize=12.2 K 2024-11-16T08:39:04,044 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 74ff98a5d3b62c48bc6fc81d8bbb1f03 in 38ms, sequenceid=11, compaction requested=false 2024-11-16T08:39:04,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: 2024-11-16T08:39:04,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:04,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 74ff98a5d3b62c48bc6fc81d8bbb1f03 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-16T08:39:04,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/23544821704b4e8cbce6075a8c50f6b4 is 1080, key is row0008/info:/1731746344007/Put/seqid=0 2024-11-16T08:39:04,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741838_1014 (size=22222) 2024-11-16T08:39:04,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741838_1014 (size=22222) 2024-11-16T08:39:04,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=30 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/23544821704b4e8cbce6075a8c50f6b4 2024-11-16T08:39:04,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/23544821704b4e8cbce6075a8c50f6b4 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/23544821704b4e8cbce6075a8c50f6b4 2024-11-16T08:39:04,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/23544821704b4e8cbce6075a8c50f6b4, entries=16, sequenceid=30, filesize=21.7 K 2024-11-16T08:39:04,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for 74ff98a5d3b62c48bc6fc81d8bbb1f03 in 35ms, sequenceid=30, compaction requested=false 2024-11-16T08:39:04,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: 2024-11-16T08:39:04,082 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=33.9 K, sizeToCheck=16.0 K 2024-11-16T08:39:04,082 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:04,082 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/23544821704b4e8cbce6075a8c50f6b4 because midkey is the same as first or last row 2024-11-16T08:39:04,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:04,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:04,923 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T08:39:04,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:04,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:05,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:05,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:06,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 74ff98a5d3b62c48bc6fc81d8bbb1f03 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-16T08:39:06,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/0eb4160fbfd14609bb7089bd770a62c5 is 1080, key is row0024/info:/1731746344048/Put/seqid=0 2024-11-16T08:39:06,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741839_1015 (size=15740) 2024-11-16T08:39:06,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741839_1015 (size=15740) 2024-11-16T08:39:06,084 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/0eb4160fbfd14609bb7089bd770a62c5 2024-11-16T08:39:06,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/0eb4160fbfd14609bb7089bd770a62c5 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/0eb4160fbfd14609bb7089bd770a62c5 2024-11-16T08:39:06,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/0eb4160fbfd14609bb7089bd770a62c5, entries=10, sequenceid=43, filesize=15.4 K 2024-11-16T08:39:06,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for 74ff98a5d3b62c48bc6fc81d8bbb1f03 in 26ms, sequenceid=43, compaction requested=true 2024-11-16T08:39:06,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: 2024-11-16T08:39:06,099 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-16T08:39:06,099 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:06,099 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/23544821704b4e8cbce6075a8c50f6b4 because midkey is the same as first or last row 2024-11-16T08:39:06,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 74ff98a5d3b62c48bc6fc81d8bbb1f03:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:06,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:06,100 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:06,101 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 74ff98a5d3b62c48bc6fc81d8bbb1f03 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T08:39:06,101 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:06,101 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): 74ff98a5d3b62c48bc6fc81d8bbb1f03/info is initiating minor compaction (all files) 2024-11-16T08:39:06,102 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 74ff98a5d3b62c48bc6fc81d8bbb1f03/info in TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:39:06,102 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/d2c784fc2d34409aa41bb681e809ae4f, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/23544821704b4e8cbce6075a8c50f6b4, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/0eb4160fbfd14609bb7089bd770a62c5] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp, totalSize=49.3 K 2024-11-16T08:39:06,102 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting d2c784fc2d34409aa41bb681e809ae4f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731746343991 2024-11-16T08:39:06,103 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 23544821704b4e8cbce6075a8c50f6b4, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=30, earliestPutTs=1731746344007 2024-11-16T08:39:06,103 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0eb4160fbfd14609bb7089bd770a62c5, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731746344048 2024-11-16T08:39:06,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/40a2a46dcab54554a05687bf3cd09ce6 is 1080, key is row0034/info:/1731746346074/Put/seqid=0 2024-11-16T08:39:06,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741840_1016 (size=17894) 2024-11-16T08:39:06,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741840_1016 (size=17894) 2024-11-16T08:39:06,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/40a2a46dcab54554a05687bf3cd09ce6 2024-11-16T08:39:06,152 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 74ff98a5d3b62c48bc6fc81d8bbb1f03#info#compaction#59 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:06,152 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/2cf114c6a5564d879768307d87e655b7 is 1080, key is row0001/info:/1731746343991/Put/seqid=0 2024-11-16T08:39:06,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-16T08:39:06,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:39370 deadline: 1731746356156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792 2024-11-16T08:39:06,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/40a2a46dcab54554a05687bf3cd09ce6 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/40a2a46dcab54554a05687bf3cd09ce6 2024-11-16T08:39:06,164 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., hostname=c27dd56784bd,39489,1731746332792, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., hostname=c27dd56784bd,39489,1731746332792, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T08:39:06,165 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., hostname=c27dd56784bd,39489,1731746332792, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T08:39:06,165 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., hostname=c27dd56784bd,39489,1731746332792, seqNum=2 because the exception is null or not the one we care about 2024-11-16T08:39:06,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741841_1017 (size=40670) 2024-11-16T08:39:06,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741841_1017 (size=40670) 2024-11-16T08:39:06,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/40a2a46dcab54554a05687bf3cd09ce6, entries=12, sequenceid=58, filesize=17.5 K 2024-11-16T08:39:06,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 74ff98a5d3b62c48bc6fc81d8bbb1f03 in 70ms, sequenceid=58, compaction requested=false 2024-11-16T08:39:06,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: 2024-11-16T08:39:06,171 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.8 K, sizeToCheck=16.0 K 2024-11-16T08:39:06,171 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:06,171 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/23544821704b4e8cbce6075a8c50f6b4 because midkey is the same as first or last row 2024-11-16T08:39:06,177 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/2cf114c6a5564d879768307d87e655b7 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/2cf114c6a5564d879768307d87e655b7 2024-11-16T08:39:06,186 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 74ff98a5d3b62c48bc6fc81d8bbb1f03/info of 74ff98a5d3b62c48bc6fc81d8bbb1f03 into 2cf114c6a5564d879768307d87e655b7(size=39.7 K), total size for store is 57.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:06,186 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: 2024-11-16T08:39:06,186 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., storeName=74ff98a5d3b62c48bc6fc81d8bbb1f03/info, priority=13, startTime=1731746346099; duration=0sec 2024-11-16T08:39:06,186 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/2cf114c6a5564d879768307d87e655b7 because midkey is the same as first or last row 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/2cf114c6a5564d879768307d87e655b7 because midkey is the same as first or last row 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=57.2 K, sizeToCheck=16.0 K 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/2cf114c6a5564d879768307d87e655b7 because midkey is the same as first or last row 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:06,187 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 74ff98a5d3b62c48bc6fc81d8bbb1f03:info 2024-11-16T08:39:06,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:06,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:07,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:07,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:08,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:08,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:09,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:09,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:10,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:10,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:11,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:11,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:12,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:12,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:13,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:13,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:14,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:14,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:15,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:15,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:16,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:16,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 74ff98a5d3b62c48bc6fc81d8bbb1f03 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-16T08:39:16,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/eb9d7b3084ae4c8c8661b50fd1aa6fbe is 1080, key is row0046/info:/1731746346102/Put/seqid=0 2024-11-16T08:39:16,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741842_1018 (size=24376) 2024-11-16T08:39:16,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741842_1018 (size=24376) 2024-11-16T08:39:16,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/eb9d7b3084ae4c8c8661b50fd1aa6fbe 2024-11-16T08:39:16,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/eb9d7b3084ae4c8c8661b50fd1aa6fbe as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/eb9d7b3084ae4c8c8661b50fd1aa6fbe 2024-11-16T08:39:16,255 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/eb9d7b3084ae4c8c8661b50fd1aa6fbe, entries=18, sequenceid=80, filesize=23.8 K 2024-11-16T08:39:16,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for 74ff98a5d3b62c48bc6fc81d8bbb1f03 in 42ms, sequenceid=80, compaction requested=true 2024-11-16T08:39:16,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: 2024-11-16T08:39:16,256 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-11-16T08:39:16,256 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:16,256 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/2cf114c6a5564d879768307d87e655b7 because midkey is the same as first or last row 2024-11-16T08:39:16,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 74ff98a5d3b62c48bc6fc81d8bbb1f03:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:16,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:16,257 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:16,258 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82940 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:16,258 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): 74ff98a5d3b62c48bc6fc81d8bbb1f03/info is initiating minor compaction (all files) 2024-11-16T08:39:16,258 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 74ff98a5d3b62c48bc6fc81d8bbb1f03/info in TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:39:16,258 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/2cf114c6a5564d879768307d87e655b7, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/40a2a46dcab54554a05687bf3cd09ce6, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/eb9d7b3084ae4c8c8661b50fd1aa6fbe] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp, totalSize=81.0 K 2024-11-16T08:39:16,259 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2cf114c6a5564d879768307d87e655b7, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731746343991 2024-11-16T08:39:16,259 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 40a2a46dcab54554a05687bf3cd09ce6, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1731746346074 2024-11-16T08:39:16,260 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting eb9d7b3084ae4c8c8661b50fd1aa6fbe, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731746346102 2024-11-16T08:39:16,284 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 74ff98a5d3b62c48bc6fc81d8bbb1f03#info#compaction#61 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:16,285 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/c713c3d933024f2d9d14f25afb30bd44 is 1080, key is row0001/info:/1731746343991/Put/seqid=0 2024-11-16T08:39:16,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741843_1019 (size=73224) 2024-11-16T08:39:16,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741843_1019 (size=73224) 2024-11-16T08:39:16,336 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/c713c3d933024f2d9d14f25afb30bd44 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/c713c3d933024f2d9d14f25afb30bd44 2024-11-16T08:39:16,354 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 74ff98a5d3b62c48bc6fc81d8bbb1f03/info of 74ff98a5d3b62c48bc6fc81d8bbb1f03 into c713c3d933024f2d9d14f25afb30bd44(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:16,355 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: 2024-11-16T08:39:16,355 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., storeName=74ff98a5d3b62c48bc6fc81d8bbb1f03/info, priority=13, startTime=1731746356256; duration=0sec 2024-11-16T08:39:16,355 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-16T08:39:16,355 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:16,355 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-16T08:39:16,355 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:16,355 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-16T08:39:16,355 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-16T08:39:16,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:16,364 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:16,364 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:16,364 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 74ff98a5d3b62c48bc6fc81d8bbb1f03:info 2024-11-16T08:39:16,369 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41263 {}] assignment.AssignmentManager(1355): Split request from c27dd56784bd,39489,1731746332792, parent={ENCODED => 74ff98a5d3b62c48bc6fc81d8bbb1f03, NAME => 'TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-16T08:39:16,377 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41263 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=c27dd56784bd,39489,1731746332792 2024-11-16T08:39:16,388 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41263 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=74ff98a5d3b62c48bc6fc81d8bbb1f03, daughterA=f567595c518fa426d00b5514ba689075, daughterB=41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:16,390 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=74ff98a5d3b62c48bc6fc81d8bbb1f03, daughterA=f567595c518fa426d00b5514ba689075, daughterB=41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:16,390 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=74ff98a5d3b62c48bc6fc81d8bbb1f03, daughterA=f567595c518fa426d00b5514ba689075, daughterB=41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:16,390 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=74ff98a5d3b62c48bc6fc81d8bbb1f03, daughterA=f567595c518fa426d00b5514ba689075, daughterB=41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:16,410 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, UNASSIGN}] 2024-11-16T08:39:16,412 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, UNASSIGN 2024-11-16T08:39:16,414 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=74ff98a5d3b62c48bc6fc81d8bbb1f03, regionState=CLOSING, regionLocation=c27dd56784bd,39489,1731746332792 2024-11-16T08:39:16,421 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, UNASSIGN 2024-11-16T08:39:16,421 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-16T08:39:16,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792}] 2024-11-16T08:39:16,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:16,581 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:16,581 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-16T08:39:16,582 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 74ff98a5d3b62c48bc6fc81d8bbb1f03, disabling compactions & flushes 2024-11-16T08:39:16,582 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:39:16,582 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:39:16,583 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. after waiting 0 ms 2024-11-16T08:39:16,583 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:39:16,583 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 74ff98a5d3b62c48bc6fc81d8bbb1f03 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-16T08:39:16,588 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/b2ebf9083bcb4cb28b9c9c89572ab81c is 1080, key is row0064/info:/1731746356217/Put/seqid=0 2024-11-16T08:39:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741844_1020 (size=6033) 2024-11-16T08:39:16,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741844_1020 (size=6033) 2024-11-16T08:39:16,593 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/b2ebf9083bcb4cb28b9c9c89572ab81c 2024-11-16T08:39:16,599 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/.tmp/info/b2ebf9083bcb4cb28b9c9c89572ab81c as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/b2ebf9083bcb4cb28b9c9c89572ab81c 2024-11-16T08:39:16,606 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/b2ebf9083bcb4cb28b9c9c89572ab81c, entries=1, sequenceid=85, filesize=5.9 K 2024-11-16T08:39:16,610 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 74ff98a5d3b62c48bc6fc81d8bbb1f03 in 27ms, sequenceid=85, compaction requested=false 2024-11-16T08:39:16,611 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/d2c784fc2d34409aa41bb681e809ae4f, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/23544821704b4e8cbce6075a8c50f6b4, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/2cf114c6a5564d879768307d87e655b7, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/0eb4160fbfd14609bb7089bd770a62c5, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/40a2a46dcab54554a05687bf3cd09ce6, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/eb9d7b3084ae4c8c8661b50fd1aa6fbe] to archive 2024-11-16T08:39:16,612 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T08:39:16,614 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/d2c784fc2d34409aa41bb681e809ae4f to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/d2c784fc2d34409aa41bb681e809ae4f 2024-11-16T08:39:16,616 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/23544821704b4e8cbce6075a8c50f6b4 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/23544821704b4e8cbce6075a8c50f6b4 2024-11-16T08:39:16,617 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/2cf114c6a5564d879768307d87e655b7 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/2cf114c6a5564d879768307d87e655b7 2024-11-16T08:39:16,618 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/0eb4160fbfd14609bb7089bd770a62c5 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/0eb4160fbfd14609bb7089bd770a62c5 2024-11-16T08:39:16,620 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/40a2a46dcab54554a05687bf3cd09ce6 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/40a2a46dcab54554a05687bf3cd09ce6 2024-11-16T08:39:16,621 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/eb9d7b3084ae4c8c8661b50fd1aa6fbe to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/eb9d7b3084ae4c8c8661b50fd1aa6fbe 2024-11-16T08:39:16,632 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-11-16T08:39:16,633 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. 2024-11-16T08:39:16,633 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 74ff98a5d3b62c48bc6fc81d8bbb1f03: Waiting for close lock at 1731746356582Running coprocessor pre-close hooks at 1731746356582Disabling compacts and flushes for region at 1731746356582Disabling writes for close at 1731746356583 (+1 ms)Obtaining lock to block concurrent updates at 1731746356583Preparing flush snapshotting stores in 74ff98a5d3b62c48bc6fc81d8bbb1f03 at 1731746356583Finished memstore snapshotting TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731746356583Flushing stores of TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. at 1731746356584 (+1 ms)Flushing 74ff98a5d3b62c48bc6fc81d8bbb1f03/info: creating writer at 1731746356584Flushing 74ff98a5d3b62c48bc6fc81d8bbb1f03/info: appending metadata at 1731746356587 (+3 ms)Flushing 74ff98a5d3b62c48bc6fc81d8bbb1f03/info: closing flushed file at 1731746356587Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64e7e78d: reopening flushed file at 1731746356598 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 74ff98a5d3b62c48bc6fc81d8bbb1f03 in 27ms, sequenceid=85, compaction requested=false at 1731746356610 (+12 ms)Writing region close event to WAL at 1731746356628 (+18 ms)Running coprocessor post-close hooks at 1731746356633 (+5 ms)Closed at 1731746356633 2024-11-16T08:39:16,636 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:16,636 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=74ff98a5d3b62c48bc6fc81d8bbb1f03, regionState=CLOSED 2024-11-16T08:39:16,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792 because future has completed 2024-11-16T08:39:16,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-16T08:39:16,644 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 74ff98a5d3b62c48bc6fc81d8bbb1f03, server=c27dd56784bd,39489,1731746332792 in 221 msec 2024-11-16T08:39:16,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-16T08:39:16,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, UNASSIGN in 234 msec 2024-11-16T08:39:16,679 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:16,683 INFO [PEWorker-2 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=74ff98a5d3b62c48bc6fc81d8bbb1f03, threads=2 2024-11-16T08:39:16,685 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/c713c3d933024f2d9d14f25afb30bd44 for region: 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:16,686 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/b2ebf9083bcb4cb28b9c9c89572ab81c for region: 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:16,698 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/b2ebf9083bcb4cb28b9c9c89572ab81c, top=true 2024-11-16T08:39:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741845_1021 (size=27) 2024-11-16T08:39:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741845_1021 (size=27) 2024-11-16T08:39:16,707 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/TestLogRolling-testLogRolling=74ff98a5d3b62c48bc6fc81d8bbb1f03-b2ebf9083bcb4cb28b9c9c89572ab81c for child: 41fb973b9df933ef140b595dbdc61fe8, parent: 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:16,707 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/b2ebf9083bcb4cb28b9c9c89572ab81c for region: 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:16,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741846_1022 (size=27) 2024-11-16T08:39:16,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741846_1022 (size=27) 2024-11-16T08:39:16,718 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/c713c3d933024f2d9d14f25afb30bd44 for region: 74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:16,720 DEBUG [PEWorker-2 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 74ff98a5d3b62c48bc6fc81d8bbb1f03 Daughter A: [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03] storefiles, Daughter B: [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/TestLogRolling-testLogRolling=74ff98a5d3b62c48bc6fc81d8bbb1f03-b2ebf9083bcb4cb28b9c9c89572ab81c, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03] storefiles. 2024-11-16T08:39:16,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741847_1023 (size=71) 2024-11-16T08:39:16,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741847_1023 (size=71) 2024-11-16T08:39:16,733 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:16,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741848_1024 (size=71) 2024-11-16T08:39:16,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741848_1024 (size=71) 2024-11-16T08:39:16,746 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:16,755 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-16T08:39:16,758 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-16T08:39:16,760 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731746356760"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731746356760"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731746356760"}]},"ts":"1731746356760"} 2024-11-16T08:39:16,760 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731746356760"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731746356760"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731746356760"}]},"ts":"1731746356760"} 2024-11-16T08:39:16,761 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731746356760"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731746356760"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731746356760"}]},"ts":"1731746356760"} 2024-11-16T08:39:16,780 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f567595c518fa426d00b5514ba689075, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41fb973b9df933ef140b595dbdc61fe8, ASSIGN}] 2024-11-16T08:39:16,782 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41fb973b9df933ef140b595dbdc61fe8, ASSIGN 2024-11-16T08:39:16,782 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f567595c518fa426d00b5514ba689075, ASSIGN 2024-11-16T08:39:16,783 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41fb973b9df933ef140b595dbdc61fe8, ASSIGN; state=SPLITTING_NEW, location=c27dd56784bd,39489,1731746332792; forceNewPlan=false, retain=false 2024-11-16T08:39:16,783 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f567595c518fa426d00b5514ba689075, ASSIGN; state=SPLITTING_NEW, location=c27dd56784bd,39489,1731746332792; forceNewPlan=false, retain=false 2024-11-16T08:39:16,933 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=f567595c518fa426d00b5514ba689075, regionState=OPENING, regionLocation=c27dd56784bd,39489,1731746332792 2024-11-16T08:39:16,933 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=41fb973b9df933ef140b595dbdc61fe8, regionState=OPENING, regionLocation=c27dd56784bd,39489,1731746332792 2024-11-16T08:39:16,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f567595c518fa426d00b5514ba689075, ASSIGN because future has completed 2024-11-16T08:39:16,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure f567595c518fa426d00b5514ba689075, server=c27dd56784bd,39489,1731746332792}] 2024-11-16T08:39:16,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41fb973b9df933ef140b595dbdc61fe8, ASSIGN because future has completed 2024-11-16T08:39:16,939 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 41fb973b9df933ef140b595dbdc61fe8, server=c27dd56784bd,39489,1731746332792}] 2024-11-16T08:39:17,095 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:17,095 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => f567595c518fa426d00b5514ba689075, NAME => 'TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-16T08:39:17,096 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,096 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:39:17,096 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,096 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,097 INFO [StoreOpener-f567595c518fa426d00b5514ba689075-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,098 INFO [StoreOpener-f567595c518fa426d00b5514ba689075-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f567595c518fa426d00b5514ba689075 columnFamilyName info 2024-11-16T08:39:17,098 DEBUG [StoreOpener-f567595c518fa426d00b5514ba689075-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:17,111 DEBUG [StoreOpener-f567595c518fa426d00b5514ba689075-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03->hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/c713c3d933024f2d9d14f25afb30bd44-bottom 2024-11-16T08:39:17,111 INFO [StoreOpener-f567595c518fa426d00b5514ba689075-1 {}] regionserver.HStore(327): Store=f567595c518fa426d00b5514ba689075/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:39:17,111 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,112 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,114 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,114 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,114 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,116 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,117 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened f567595c518fa426d00b5514ba689075; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782964, jitterRate=-0.0044104307889938354}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T08:39:17,117 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f567595c518fa426d00b5514ba689075 2024-11-16T08:39:17,118 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for f567595c518fa426d00b5514ba689075: Running coprocessor pre-open hook at 1731746357096Writing region info on filesystem at 1731746357096Initializing all the Stores at 1731746357097 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746357097Cleaning up temporary data from old regions at 1731746357114 (+17 ms)Running coprocessor post-open hooks at 1731746357117 (+3 ms)Region opened successfully at 1731746357118 (+1 ms) 2024-11-16T08:39:17,119 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075., pid=12, masterSystemTime=1731746357091 2024-11-16T08:39:17,119 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store f567595c518fa426d00b5514ba689075:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:17,119 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:17,119 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-16T08:39:17,120 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:17,120 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): f567595c518fa426d00b5514ba689075/info is initiating minor compaction (all files) 2024-11-16T08:39:17,120 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f567595c518fa426d00b5514ba689075/info in TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:17,121 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03->hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/c713c3d933024f2d9d14f25afb30bd44-bottom] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/.tmp, totalSize=71.5 K 2024-11-16T08:39:17,121 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731746343991 2024-11-16T08:39:17,122 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:17,122 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:17,122 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:17,122 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 41fb973b9df933ef140b595dbdc61fe8, NAME => 'TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-16T08:39:17,123 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,123 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:39:17,123 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=f567595c518fa426d00b5514ba689075, regionState=OPEN, openSeqNum=89, regionLocation=c27dd56784bd,39489,1731746332792 2024-11-16T08:39:17,123 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,123 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,125 INFO [StoreOpener-41fb973b9df933ef140b595dbdc61fe8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,126 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-16T08:39:17,126 INFO [StoreOpener-41fb973b9df933ef140b595dbdc61fe8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41fb973b9df933ef140b595dbdc61fe8 columnFamilyName info 2024-11-16T08:39:17,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure f567595c518fa426d00b5514ba689075, server=c27dd56784bd,39489,1731746332792 because future has completed 2024-11-16T08:39:17,126 DEBUG [StoreOpener-41fb973b9df933ef140b595dbdc61fe8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:17,128 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-16T08:39:17,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-16T08:39:17,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-16T08:39:17,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure f567595c518fa426d00b5514ba689075, server=c27dd56784bd,39489,1731746332792 in 191 msec 2024-11-16T08:39:17,134 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=f567595c518fa426d00b5514ba689075, ASSIGN in 352 msec 2024-11-16T08:39:17,139 DEBUG [StoreOpener-41fb973b9df933ef140b595dbdc61fe8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/TestLogRolling-testLogRolling=74ff98a5d3b62c48bc6fc81d8bbb1f03-b2ebf9083bcb4cb28b9c9c89572ab81c 2024-11-16T08:39:17,144 DEBUG [StoreOpener-41fb973b9df933ef140b595dbdc61fe8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03->hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/c713c3d933024f2d9d14f25afb30bd44-top 2024-11-16T08:39:17,144 INFO [StoreOpener-41fb973b9df933ef140b595dbdc61fe8-1 {}] regionserver.HStore(327): Store=41fb973b9df933ef140b595dbdc61fe8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:39:17,144 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,145 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,146 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,146 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f567595c518fa426d00b5514ba689075#info#compaction#63 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:17,147 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,147 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,147 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/.tmp/info/4b4bae40d2c144ed925c5f270a8fe912 is 1080, key is row0001/info:/1731746343991/Put/seqid=0 2024-11-16T08:39:17,148 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/info/7d68adb9dd804bc0b31197373c0164cf is 193, key is TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8./info:regioninfo/1731746356933/Put/seqid=0 2024-11-16T08:39:17,150 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 41fb973b9df933ef140b595dbdc61fe8; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743606, jitterRate=-0.054456114768981934}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-16T08:39:17,150 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:17,150 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 41fb973b9df933ef140b595dbdc61fe8: Running coprocessor pre-open hook at 1731746357123Writing region info on filesystem at 1731746357123Initializing all the Stores at 1731746357124 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746357124Cleaning up temporary data from old regions at 1731746357147 (+23 ms)Running coprocessor post-open hooks at 1731746357150 (+3 ms)Region opened successfully at 1731746357150 2024-11-16T08:39:17,151 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., pid=13, masterSystemTime=1731746357091 2024-11-16T08:39:17,151 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 41fb973b9df933ef140b595dbdc61fe8:info, priority=-2147483648, current under compaction store size is 2 2024-11-16T08:39:17,151 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-16T08:39:17,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741849_1025 (size=70862) 2024-11-16T08:39:17,151 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-16T08:39:17,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741849_1025 (size=70862) 2024-11-16T08:39:17,153 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:17,153 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HStore(1541): 41fb973b9df933ef140b595dbdc61fe8/info is initiating minor compaction (all files) 2024-11-16T08:39:17,153 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41fb973b9df933ef140b595dbdc61fe8/info in TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:17,153 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03->hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/c713c3d933024f2d9d14f25afb30bd44-top, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/TestLogRolling-testLogRolling=74ff98a5d3b62c48bc6fc81d8bbb1f03-b2ebf9083bcb4cb28b9c9c89572ab81c] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp, totalSize=77.4 K 2024-11-16T08:39:17,154 DEBUG [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:17,154 INFO [RS_OPEN_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:17,155 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] compactions.Compactor(225): Compacting c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731746343991 2024-11-16T08:39:17,155 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=41fb973b9df933ef140b595dbdc61fe8, regionState=OPEN, openSeqNum=89, regionLocation=c27dd56784bd,39489,1731746332792 2024-11-16T08:39:17,155 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=74ff98a5d3b62c48bc6fc81d8bbb1f03-b2ebf9083bcb4cb28b9c9c89572ab81c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731746356217 2024-11-16T08:39:17,159 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 41fb973b9df933ef140b595dbdc61fe8, server=c27dd56784bd,39489,1731746332792 because future has completed 2024-11-16T08:39:17,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741850_1026 (size=9847) 2024-11-16T08:39:17,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741850_1026 (size=9847) 2024-11-16T08:39:17,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/info/7d68adb9dd804bc0b31197373c0164cf 2024-11-16T08:39:17,168 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/.tmp/info/4b4bae40d2c144ed925c5f270a8fe912 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/info/4b4bae40d2c144ed925c5f270a8fe912 2024-11-16T08:39:17,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-16T08:39:17,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 41fb973b9df933ef140b595dbdc61fe8, server=c27dd56784bd,39489,1731746332792 in 224 msec 2024-11-16T08:39:17,175 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-16T08:39:17,176 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=41fb973b9df933ef140b595dbdc61fe8, ASSIGN in 389 msec 2024-11-16T08:39:17,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=74ff98a5d3b62c48bc6fc81d8bbb1f03, daughterA=f567595c518fa426d00b5514ba689075, daughterB=41fb973b9df933ef140b595dbdc61fe8 in 796 msec 2024-11-16T08:39:17,177 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in f567595c518fa426d00b5514ba689075/info of f567595c518fa426d00b5514ba689075 into 4b4bae40d2c144ed925c5f270a8fe912(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:17,177 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f567595c518fa426d00b5514ba689075: 2024-11-16T08:39:17,177 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075., storeName=f567595c518fa426d00b5514ba689075/info, priority=15, startTime=1731746357119; duration=0sec 2024-11-16T08:39:17,177 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:17,177 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f567595c518fa426d00b5514ba689075:info 2024-11-16T08:39:17,185 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41fb973b9df933ef140b595dbdc61fe8#info#compaction#65 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:17,186 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/fd7150450bbc47489ad1d46ad67ad842 is 1080, key is row0062/info:/1731746346151/Put/seqid=0 2024-11-16T08:39:17,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/ns/95a86a7fea3d45f3b5a80a0c3a8c1966 is 43, key is default/ns:d/1731746333851/Put/seqid=0 2024-11-16T08:39:17,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741851_1027 (size=8359) 2024-11-16T08:39:17,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741851_1027 (size=8359) 2024-11-16T08:39:17,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741852_1028 (size=5153) 2024-11-16T08:39:17,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741852_1028 (size=5153) 2024-11-16T08:39:17,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/ns/95a86a7fea3d45f3b5a80a0c3a8c1966 2024-11-16T08:39:17,214 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/fd7150450bbc47489ad1d46ad67ad842 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/fd7150450bbc47489ad1d46ad67ad842 2024-11-16T08:39:17,223 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in 41fb973b9df933ef140b595dbdc61fe8/info of 41fb973b9df933ef140b595dbdc61fe8 into fd7150450bbc47489ad1d46ad67ad842(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:17,224 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:17,224 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., storeName=41fb973b9df933ef140b595dbdc61fe8/info, priority=14, startTime=1731746357151; duration=0sec 2024-11-16T08:39:17,224 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:17,224 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41fb973b9df933ef140b595dbdc61fe8:info 2024-11-16T08:39:17,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/table/c53af485a74a409ea8a40250bbff0e8c is 65, key is TestLogRolling-testLogRolling/table:state/1731746334305/Put/seqid=0 2024-11-16T08:39:17,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741853_1029 (size=5340) 2024-11-16T08:39:17,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741853_1029 (size=5340) 2024-11-16T08:39:17,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/table/c53af485a74a409ea8a40250bbff0e8c 2024-11-16T08:39:17,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/info/7d68adb9dd804bc0b31197373c0164cf as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/info/7d68adb9dd804bc0b31197373c0164cf 2024-11-16T08:39:17,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/info/7d68adb9dd804bc0b31197373c0164cf, entries=30, sequenceid=17, filesize=9.6 K 2024-11-16T08:39:17,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/ns/95a86a7fea3d45f3b5a80a0c3a8c1966 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/ns/95a86a7fea3d45f3b5a80a0c3a8c1966 2024-11-16T08:39:17,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/ns/95a86a7fea3d45f3b5a80a0c3a8c1966, entries=2, sequenceid=17, filesize=5.0 K 2024-11-16T08:39:17,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/table/c53af485a74a409ea8a40250bbff0e8c as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/table/c53af485a74a409ea8a40250bbff0e8c 2024-11-16T08:39:17,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/table/c53af485a74a409ea8a40250bbff0e8c, entries=2, sequenceid=17, filesize=5.2 K 2024-11-16T08:39:17,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 135ms, sequenceid=17, compaction requested=false 2024-11-16T08:39:17,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T08:39:17,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:17,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:18,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:39370 deadline: 1731746368219, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. is not online on c27dd56784bd,39489,1731746332792 2024-11-16T08:39:18,220 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., hostname=c27dd56784bd,39489,1731746332792, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., hostname=c27dd56784bd,39489,1731746332792, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. is not online on c27dd56784bd,39489,1731746332792 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T08:39:18,220 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., hostname=c27dd56784bd,39489,1731746332792, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03. is not online on c27dd56784bd,39489,1731746332792 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T08:39:18,220 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731746333928.74ff98a5d3b62c48bc6fc81d8bbb1f03., hostname=c27dd56784bd,39489,1731746332792, seqNum=2 from cache 2024-11-16T08:39:18,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:18,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:19,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:19,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:20,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:20,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:21,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:21,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:21,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,666 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,667 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,668 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:21,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,182 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-16T08:39:22,182 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,208 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,213 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:22,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:22,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:22,577 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-16T08:39:23,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:23,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:24,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:24,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:25,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:25,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:26,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:26,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:27,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:27,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:28,259 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., hostname=c27dd56784bd,39489,1731746332792, seqNum=89] 2024-11-16T08:39:28,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:28,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:39:28,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/0404fa91bcbe4c4f86e140161d1d2d51 is 1080, key is row0065/info:/1731746368260/Put/seqid=0 2024-11-16T08:39:28,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741854_1030 (size=12509) 2024-11-16T08:39:28,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741854_1030 (size=12509) 2024-11-16T08:39:28,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/0404fa91bcbe4c4f86e140161d1d2d51 2024-11-16T08:39:28,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/0404fa91bcbe4c4f86e140161d1d2d51 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/0404fa91bcbe4c4f86e140161d1d2d51 2024-11-16T08:39:28,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/0404fa91bcbe4c4f86e140161d1d2d51, entries=7, sequenceid=99, filesize=12.2 K 2024-11-16T08:39:28,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 41fb973b9df933ef140b595dbdc61fe8 in 32ms, sequenceid=99, compaction requested=false 2024-11-16T08:39:28,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:28,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:28,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-16T08:39:28,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/9cd7bde69cf64852bcfa4a4a1b457162 is 1080, key is row0072/info:/1731746368274/Put/seqid=0 2024-11-16T08:39:28,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741855_1031 (size=20064) 2024-11-16T08:39:28,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741855_1031 (size=20064) 2024-11-16T08:39:28,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/9cd7bde69cf64852bcfa4a4a1b457162 2024-11-16T08:39:28,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/9cd7bde69cf64852bcfa4a4a1b457162 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/9cd7bde69cf64852bcfa4a4a1b457162 2024-11-16T08:39:28,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/9cd7bde69cf64852bcfa4a4a1b457162, entries=14, sequenceid=116, filesize=19.6 K 2024-11-16T08:39:28,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:28,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 41fb973b9df933ef140b595dbdc61fe8 in 60ms, sequenceid=116, compaction requested=true 2024-11-16T08:39:28,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:28,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 41fb973b9df933ef140b595dbdc61fe8:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:28,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:28,366 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:28,372 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40932 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:28,372 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): 41fb973b9df933ef140b595dbdc61fe8/info is initiating minor compaction (all files) 2024-11-16T08:39:28,373 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41fb973b9df933ef140b595dbdc61fe8/info in TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:28,373 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/fd7150450bbc47489ad1d46ad67ad842, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/0404fa91bcbe4c4f86e140161d1d2d51, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/9cd7bde69cf64852bcfa4a4a1b457162] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp, totalSize=40.0 K 2024-11-16T08:39:28,381 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting fd7150450bbc47489ad1d46ad67ad842, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731746346151 2024-11-16T08:39:28,385 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0404fa91bcbe4c4f86e140161d1d2d51, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731746368260 2024-11-16T08:39:28,385 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9cd7bde69cf64852bcfa4a4a1b457162, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731746368274 2024-11-16T08:39:28,422 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41fb973b9df933ef140b595dbdc61fe8#info#compaction#70 average throughput is 24.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:28,423 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/2a15c5dd81954bc5a87d586f226a2479 is 1080, key is row0062/info:/1731746346151/Put/seqid=0 2024-11-16T08:39:28,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741856_1032 (size=31106) 2024-11-16T08:39:28,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741856_1032 (size=31106) 2024-11-16T08:39:28,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:28,445 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/2a15c5dd81954bc5a87d586f226a2479 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a15c5dd81954bc5a87d586f226a2479 2024-11-16T08:39:28,454 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 41fb973b9df933ef140b595dbdc61fe8/info of 41fb973b9df933ef140b595dbdc61fe8 into 2a15c5dd81954bc5a87d586f226a2479(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:28,454 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:28,454 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., storeName=41fb973b9df933ef140b595dbdc61fe8/info, priority=13, startTime=1731746368366; duration=0sec 2024-11-16T08:39:28,454 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:28,454 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41fb973b9df933ef140b595dbdc61fe8:info 2024-11-16T08:39:29,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:29,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:30,337 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T08:39:30,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/60a0e6f6c27841c5b3c82d633c4c065c is 1080, key is row0086/info:/1731746368306/Put/seqid=0 2024-11-16T08:39:30,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741857_1033 (size=17896) 2024-11-16T08:39:30,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741857_1033 (size=17896) 2024-11-16T08:39:30,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/60a0e6f6c27841c5b3c82d633c4c065c 2024-11-16T08:39:30,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/60a0e6f6c27841c5b3c82d633c4c065c as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/60a0e6f6c27841c5b3c82d633c4c065c 2024-11-16T08:39:30,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/60a0e6f6c27841c5b3c82d633c4c065c, entries=12, sequenceid=132, filesize=17.5 K 2024-11-16T08:39:30,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:30,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for 41fb973b9df933ef140b595dbdc61fe8 in 29ms, sequenceid=132, compaction requested=false 2024-11-16T08:39:30,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:30,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-16T08:39:30,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/99b7f0dbd9a24ab4a75eed460b3bcd5b is 1080, key is row0098/info:/1731746370338/Put/seqid=0 2024-11-16T08:39:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741858_1034 (size=20078) 2024-11-16T08:39:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741858_1034 (size=20078) 2024-11-16T08:39:30,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=149 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/99b7f0dbd9a24ab4a75eed460b3bcd5b 2024-11-16T08:39:30,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/99b7f0dbd9a24ab4a75eed460b3bcd5b as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/99b7f0dbd9a24ab4a75eed460b3bcd5b 2024-11-16T08:39:30,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/99b7f0dbd9a24ab4a75eed460b3bcd5b, entries=14, sequenceid=149, filesize=19.6 K 2024-11-16T08:39:30,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for 41fb973b9df933ef140b595dbdc61fe8 in 22ms, sequenceid=149, compaction requested=true 2024-11-16T08:39:30,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:30,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 41fb973b9df933ef140b595dbdc61fe8:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:30,389 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:30,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:30,390 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 69080 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:30,390 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): 41fb973b9df933ef140b595dbdc61fe8/info is initiating minor compaction (all files) 2024-11-16T08:39:30,390 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T08:39:30,390 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41fb973b9df933ef140b595dbdc61fe8/info in TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:30,390 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a15c5dd81954bc5a87d586f226a2479, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/60a0e6f6c27841c5b3c82d633c4c065c, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/99b7f0dbd9a24ab4a75eed460b3bcd5b] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp, totalSize=67.5 K 2024-11-16T08:39:30,391 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2a15c5dd81954bc5a87d586f226a2479, keycount=24, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731746346151 2024-11-16T08:39:30,391 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 60a0e6f6c27841c5b3c82d633c4c065c, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731746368306 2024-11-16T08:39:30,391 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 99b7f0dbd9a24ab4a75eed460b3bcd5b, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1731746370338 2024-11-16T08:39:30,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/cc7036ca87cb41899617b1b9734a954a is 1080, key is row0112/info:/1731746370368/Put/seqid=0 2024-11-16T08:39:30,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741859_1035 (size=16828) 2024-11-16T08:39:30,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741859_1035 (size=16828) 2024-11-16T08:39:30,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/cc7036ca87cb41899617b1b9734a954a 2024-11-16T08:39:30,431 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41fb973b9df933ef140b595dbdc61fe8#info#compaction#74 average throughput is 17.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:30,431 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/348c55e3dcd743e4992328fe5386c199 is 1080, key is row0062/info:/1731746346151/Put/seqid=0 2024-11-16T08:39:30,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/cc7036ca87cb41899617b1b9734a954a as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cc7036ca87cb41899617b1b9734a954a 2024-11-16T08:39:30,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cc7036ca87cb41899617b1b9734a954a, entries=11, sequenceid=163, filesize=16.4 K 2024-11-16T08:39:30,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=6.30 KB/6456 for 41fb973b9df933ef140b595dbdc61fe8 in 53ms, sequenceid=163, compaction requested=false 2024-11-16T08:39:30,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:30,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:30,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741860_1036 (size=59266) 2024-11-16T08:39:30,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741860_1036 (size=59266) 2024-11-16T08:39:30,456 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/348c55e3dcd743e4992328fe5386c199 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/348c55e3dcd743e4992328fe5386c199 2024-11-16T08:39:30,461 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 41fb973b9df933ef140b595dbdc61fe8/info of 41fb973b9df933ef140b595dbdc61fe8 into 348c55e3dcd743e4992328fe5386c199(size=57.9 K), total size for store is 74.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:30,461 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:30,461 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., storeName=41fb973b9df933ef140b595dbdc61fe8/info, priority=13, startTime=1731746370389; duration=0sec 2024-11-16T08:39:30,462 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:30,462 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41fb973b9df933ef140b595dbdc61fe8:info 2024-11-16T08:39:31,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:31,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:32,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:32,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:32,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:39:32,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/2a2b4081366b4ad9ae81f4b74707dbc3 is 1080, key is row0123/info:/1731746370391/Put/seqid=0 2024-11-16T08:39:32,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741861_1037 (size=12516) 2024-11-16T08:39:32,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741861_1037 (size=12516) 2024-11-16T08:39:32,437 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/2a2b4081366b4ad9ae81f4b74707dbc3 2024-11-16T08:39:32,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/2a2b4081366b4ad9ae81f4b74707dbc3 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a2b4081366b4ad9ae81f4b74707dbc3 2024-11-16T08:39:32,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:32,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a2b4081366b4ad9ae81f4b74707dbc3, entries=7, sequenceid=174, filesize=12.2 K 2024-11-16T08:39:32,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 41fb973b9df933ef140b595dbdc61fe8 in 39ms, sequenceid=174, compaction requested=true 2024-11-16T08:39:32,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:32,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 41fb973b9df933ef140b595dbdc61fe8:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:32,460 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:32,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:32,461 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 88610 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:32,461 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): 41fb973b9df933ef140b595dbdc61fe8/info is initiating minor compaction (all files) 2024-11-16T08:39:32,461 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41fb973b9df933ef140b595dbdc61fe8/info in TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:32,461 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/348c55e3dcd743e4992328fe5386c199, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cc7036ca87cb41899617b1b9734a954a, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a2b4081366b4ad9ae81f4b74707dbc3] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp, totalSize=86.5 K 2024-11-16T08:39:32,462 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 348c55e3dcd743e4992328fe5386c199, keycount=50, bloomtype=ROW, size=57.9 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1731746346151 2024-11-16T08:39:32,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:32,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-16T08:39:32,462 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc7036ca87cb41899617b1b9734a954a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1731746370368 2024-11-16T08:39:32,463 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2a2b4081366b4ad9ae81f4b74707dbc3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731746370391 2024-11-16T08:39:32,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/d23b28b39cda4b13bd71cf583e51d865 is 1080, key is row0130/info:/1731746372421/Put/seqid=0 2024-11-16T08:39:32,485 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41fb973b9df933ef140b595dbdc61fe8#info#compaction#77 average throughput is 23.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:32,486 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bbd1d55de1c44da49cff49f2557eb537 is 1080, key is row0062/info:/1731746346151/Put/seqid=0 2024-11-16T08:39:32,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=41fb973b9df933ef140b595dbdc61fe8, server=c27dd56784bd,39489,1731746332792 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-16T08:39:32,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:39370 deadline: 1731746382508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=41fb973b9df933ef140b595dbdc61fe8, server=c27dd56784bd,39489,1731746332792 2024-11-16T08:39:32,509 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., hostname=c27dd56784bd,39489,1731746332792, seqNum=89 , the old value is region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., hostname=c27dd56784bd,39489,1731746332792, seqNum=89, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=41fb973b9df933ef140b595dbdc61fe8, server=c27dd56784bd,39489,1731746332792 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T08:39:32,509 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., hostname=c27dd56784bd,39489,1731746332792, seqNum=89 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=41fb973b9df933ef140b595dbdc61fe8, server=c27dd56784bd,39489,1731746332792 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-16T08:39:32,509 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., hostname=c27dd56784bd,39489,1731746332792, seqNum=89 because the exception is null or not the one we care about 2024-11-16T08:39:32,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741862_1038 (size=21156) 2024-11-16T08:39:32,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741862_1038 (size=21156) 2024-11-16T08:39:32,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/d23b28b39cda4b13bd71cf583e51d865 2024-11-16T08:39:32,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741863_1039 (size=78909) 2024-11-16T08:39:32,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741863_1039 (size=78909) 2024-11-16T08:39:32,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/d23b28b39cda4b13bd71cf583e51d865 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d23b28b39cda4b13bd71cf583e51d865 2024-11-16T08:39:32,520 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bbd1d55de1c44da49cff49f2557eb537 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bbd1d55de1c44da49cff49f2557eb537 2024-11-16T08:39:32,526 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 41fb973b9df933ef140b595dbdc61fe8/info of 41fb973b9df933ef140b595dbdc61fe8 into bbd1d55de1c44da49cff49f2557eb537(size=77.1 K), total size for store is 77.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:32,526 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:32,526 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., storeName=41fb973b9df933ef140b595dbdc61fe8/info, priority=13, startTime=1731746372460; duration=0sec 2024-11-16T08:39:32,526 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:32,526 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41fb973b9df933ef140b595dbdc61fe8:info 2024-11-16T08:39:32,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d23b28b39cda4b13bd71cf583e51d865, entries=15, sequenceid=192, filesize=20.7 K 2024-11-16T08:39:32,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for 41fb973b9df933ef140b595dbdc61fe8 in 71ms, sequenceid=192, compaction requested=false 2024-11-16T08:39:32,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:33,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:33,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:34,350 INFO [master/c27dd56784bd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-16T08:39:34,350 INFO [master/c27dd56784bd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-16T08:39:34,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:34,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:35,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:35,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:36,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:36,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:37,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:37,450 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:38,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:38,451 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:38,757 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-16T08:39:39,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:39,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:40,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:40,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:41,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:41,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:42,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:42,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:42,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-16T08:39:42,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/85c0cb44c1554e6fa2958fcfeaee6642 is 1080, key is row0145/info:/1731746372464/Put/seqid=0 2024-11-16T08:39:42,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741864_1040 (size=21156) 2024-11-16T08:39:42,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741864_1040 (size=21156) 2024-11-16T08:39:42,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/85c0cb44c1554e6fa2958fcfeaee6642 2024-11-16T08:39:42,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/85c0cb44c1554e6fa2958fcfeaee6642 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/85c0cb44c1554e6fa2958fcfeaee6642 2024-11-16T08:39:42,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/85c0cb44c1554e6fa2958fcfeaee6642, entries=15, sequenceid=211, filesize=20.7 K 2024-11-16T08:39:42,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=1.05 KB/1076 for 41fb973b9df933ef140b595dbdc61fe8 in 27ms, sequenceid=211, compaction requested=true 2024-11-16T08:39:42,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:42,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 41fb973b9df933ef140b595dbdc61fe8:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:42,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:42,575 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:42,576 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 121221 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:42,576 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HStore(1541): 41fb973b9df933ef140b595dbdc61fe8/info is initiating minor compaction (all files) 2024-11-16T08:39:42,576 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41fb973b9df933ef140b595dbdc61fe8/info in TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:42,576 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bbd1d55de1c44da49cff49f2557eb537, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d23b28b39cda4b13bd71cf583e51d865, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/85c0cb44c1554e6fa2958fcfeaee6642] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp, totalSize=118.4 K 2024-11-16T08:39:42,577 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] compactions.Compactor(225): Compacting bbd1d55de1c44da49cff49f2557eb537, keycount=68, bloomtype=ROW, size=77.1 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731746346151 2024-11-16T08:39:42,577 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] compactions.Compactor(225): Compacting d23b28b39cda4b13bd71cf583e51d865, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1731746372421 2024-11-16T08:39:42,577 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] compactions.Compactor(225): Compacting 85c0cb44c1554e6fa2958fcfeaee6642, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731746372464 2024-11-16T08:39:42,589 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41fb973b9df933ef140b595dbdc61fe8#info#compaction#79 average throughput is 33.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:42,590 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/05a8d3d30060426e88ce7925857aab9b is 1080, key is row0062/info:/1731746346151/Put/seqid=0 2024-11-16T08:39:42,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741865_1041 (size=111359) 2024-11-16T08:39:42,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741865_1041 (size=111359) 2024-11-16T08:39:42,598 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/05a8d3d30060426e88ce7925857aab9b as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/05a8d3d30060426e88ce7925857aab9b 2024-11-16T08:39:42,603 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 41fb973b9df933ef140b595dbdc61fe8/info of 41fb973b9df933ef140b595dbdc61fe8 into 05a8d3d30060426e88ce7925857aab9b(size=108.7 K), total size for store is 108.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:42,603 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:42,603 INFO [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., storeName=41fb973b9df933ef140b595dbdc61fe8/info, priority=13, startTime=1731746382575; duration=0sec 2024-11-16T08:39:42,604 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:42,604 DEBUG [RS:0;c27dd56784bd:39489-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41fb973b9df933ef140b595dbdc61fe8:info 2024-11-16T08:39:43,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:43,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:44,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:44,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:44,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:44,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:39:44,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/54744597680c43b2be3c522fac7d9c17 is 1080, key is row0160/info:/1731746382550/Put/seqid=0 2024-11-16T08:39:44,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741866_1042 (size=12516) 2024-11-16T08:39:44,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741866_1042 (size=12516) 2024-11-16T08:39:44,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/54744597680c43b2be3c522fac7d9c17 2024-11-16T08:39:44,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/54744597680c43b2be3c522fac7d9c17 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/54744597680c43b2be3c522fac7d9c17 2024-11-16T08:39:44,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/54744597680c43b2be3c522fac7d9c17, entries=7, sequenceid=222, filesize=12.2 K 2024-11-16T08:39:44,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 41fb973b9df933ef140b595dbdc61fe8 in 21ms, sequenceid=222, compaction requested=false 2024-11-16T08:39:44,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:44,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:44,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T08:39:44,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/4263854d2fef497d875fbb7861b199a3 is 1080, key is row0167/info:/1731746384571/Put/seqid=0 2024-11-16T08:39:44,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741867_1043 (size=16828) 2024-11-16T08:39:44,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741867_1043 (size=16828) 2024-11-16T08:39:44,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/4263854d2fef497d875fbb7861b199a3 2024-11-16T08:39:44,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/4263854d2fef497d875fbb7861b199a3 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/4263854d2fef497d875fbb7861b199a3 2024-11-16T08:39:44,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/4263854d2fef497d875fbb7861b199a3, entries=11, sequenceid=236, filesize=16.4 K 2024-11-16T08:39:44,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for 41fb973b9df933ef140b595dbdc61fe8 in 22ms, sequenceid=236, compaction requested=true 2024-11-16T08:39:44,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:44,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 41fb973b9df933ef140b595dbdc61fe8:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:44,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:44,615 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:44,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:44,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-16T08:39:44,616 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 140703 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:44,616 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): 41fb973b9df933ef140b595dbdc61fe8/info is initiating minor compaction (all files) 2024-11-16T08:39:44,616 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41fb973b9df933ef140b595dbdc61fe8/info in TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:44,616 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/05a8d3d30060426e88ce7925857aab9b, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/54744597680c43b2be3c522fac7d9c17, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/4263854d2fef497d875fbb7861b199a3] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp, totalSize=137.4 K 2024-11-16T08:39:44,617 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 05a8d3d30060426e88ce7925857aab9b, keycount=98, bloomtype=ROW, size=108.7 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731746346151 2024-11-16T08:39:44,617 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 54744597680c43b2be3c522fac7d9c17, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1731746382550 2024-11-16T08:39:44,617 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4263854d2fef497d875fbb7861b199a3, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1731746384571 2024-11-16T08:39:44,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/a4d12b163b2947b69b7aa6f6e2aca5d1 is 1080, key is row0178/info:/1731746384594/Put/seqid=0 2024-11-16T08:39:44,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741868_1044 (size=17906) 2024-11-16T08:39:44,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741868_1044 (size=17906) 2024-11-16T08:39:44,625 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/a4d12b163b2947b69b7aa6f6e2aca5d1 2024-11-16T08:39:44,629 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41fb973b9df933ef140b595dbdc61fe8#info#compaction#83 average throughput is 39.68 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:44,629 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/7ebe4d93110f49fe9a192ad1419b4070 is 1080, key is row0062/info:/1731746346151/Put/seqid=0 2024-11-16T08:39:44,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/a4d12b163b2947b69b7aa6f6e2aca5d1 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/a4d12b163b2947b69b7aa6f6e2aca5d1 2024-11-16T08:39:44,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/a4d12b163b2947b69b7aa6f6e2aca5d1, entries=12, sequenceid=251, filesize=17.5 K 2024-11-16T08:39:44,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for 41fb973b9df933ef140b595dbdc61fe8 in 20ms, sequenceid=251, compaction requested=false 2024-11-16T08:39:44,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:44,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741869_1045 (size=130997) 2024-11-16T08:39:44,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741869_1045 (size=130997) 2024-11-16T08:39:44,643 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/7ebe4d93110f49fe9a192ad1419b4070 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/7ebe4d93110f49fe9a192ad1419b4070 2024-11-16T08:39:44,648 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 41fb973b9df933ef140b595dbdc61fe8/info of 41fb973b9df933ef140b595dbdc61fe8 into 7ebe4d93110f49fe9a192ad1419b4070(size=127.9 K), total size for store is 145.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:44,648 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:44,648 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., storeName=41fb973b9df933ef140b595dbdc61fe8/info, priority=13, startTime=1731746384615; duration=0sec 2024-11-16T08:39:44,648 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:44,648 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41fb973b9df933ef140b595dbdc61fe8:info 2024-11-16T08:39:45,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:45,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:46,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:46,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:46,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:39:46,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/037d180a2423428f8eb6960a5c900978 is 1080, key is row0190/info:/1731746384617/Put/seqid=0 2024-11-16T08:39:46,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741870_1046 (size=12520) 2024-11-16T08:39:46,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741870_1046 (size=12520) 2024-11-16T08:39:46,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/037d180a2423428f8eb6960a5c900978 2024-11-16T08:39:46,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/037d180a2423428f8eb6960a5c900978 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/037d180a2423428f8eb6960a5c900978 2024-11-16T08:39:46,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/037d180a2423428f8eb6960a5c900978, entries=7, sequenceid=262, filesize=12.2 K 2024-11-16T08:39:46,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 41fb973b9df933ef140b595dbdc61fe8 in 24ms, sequenceid=262, compaction requested=true 2024-11-16T08:39:46,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:46,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 41fb973b9df933ef140b595dbdc61fe8:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:46,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:46,653 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:46,654 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 161423 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:46,654 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): 41fb973b9df933ef140b595dbdc61fe8/info is initiating minor compaction (all files) 2024-11-16T08:39:46,654 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41fb973b9df933ef140b595dbdc61fe8/info in TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:46,654 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/7ebe4d93110f49fe9a192ad1419b4070, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/a4d12b163b2947b69b7aa6f6e2aca5d1, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/037d180a2423428f8eb6960a5c900978] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp, totalSize=157.6 K 2024-11-16T08:39:46,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-16T08:39:46,655 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7ebe4d93110f49fe9a192ad1419b4070, keycount=116, bloomtype=ROW, size=127.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1731746346151 2024-11-16T08:39:46,655 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting a4d12b163b2947b69b7aa6f6e2aca5d1, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731746384594 2024-11-16T08:39:46,656 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 037d180a2423428f8eb6960a5c900978, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1731746384617 2024-11-16T08:39:46,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/919b0f25571d43b0aa1205dc02865bc1 is 1080, key is row0197/info:/1731746386630/Put/seqid=0 2024-11-16T08:39:46,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741871_1047 (size=20092) 2024-11-16T08:39:46,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741871_1047 (size=20092) 2024-11-16T08:39:46,669 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41fb973b9df933ef140b595dbdc61fe8#info#compaction#86 average throughput is 69.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:46,669 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/d2879056844b4dcfa1dd71d558612b55 is 1080, key is row0062/info:/1731746346151/Put/seqid=0 2024-11-16T08:39:46,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741872_1048 (size=151658) 2024-11-16T08:39:46,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741872_1048 (size=151658) 2024-11-16T08:39:46,680 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/d2879056844b4dcfa1dd71d558612b55 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d2879056844b4dcfa1dd71d558612b55 2024-11-16T08:39:46,686 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 41fb973b9df933ef140b595dbdc61fe8/info of 41fb973b9df933ef140b595dbdc61fe8 into d2879056844b4dcfa1dd71d558612b55(size=148.1 K), total size for store is 148.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:46,686 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:46,686 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., storeName=41fb973b9df933ef140b595dbdc61fe8/info, priority=13, startTime=1731746386653; duration=0sec 2024-11-16T08:39:46,686 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:46,686 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41fb973b9df933ef140b595dbdc61fe8:info 2024-11-16T08:39:47,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/919b0f25571d43b0aa1205dc02865bc1 2024-11-16T08:39:47,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/919b0f25571d43b0aa1205dc02865bc1 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/919b0f25571d43b0aa1205dc02865bc1 2024-11-16T08:39:47,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/919b0f25571d43b0aa1205dc02865bc1, entries=14, sequenceid=279, filesize=19.6 K 2024-11-16T08:39:47,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for 41fb973b9df933ef140b595dbdc61fe8 in 431ms, sequenceid=279, compaction requested=false 2024-11-16T08:39:47,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:47,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:47,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:48,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:48,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:48,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:48,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-16T08:39:48,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/cd3999e21523448c8fd618fae2927754 is 1080, key is row0211/info:/1731746386656/Put/seqid=0 2024-11-16T08:39:48,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741873_1049 (size=21171) 2024-11-16T08:39:48,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741873_1049 (size=21171) 2024-11-16T08:39:48,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/cd3999e21523448c8fd618fae2927754 2024-11-16T08:39:48,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/cd3999e21523448c8fd618fae2927754 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cd3999e21523448c8fd618fae2927754 2024-11-16T08:39:48,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cd3999e21523448c8fd618fae2927754, entries=15, sequenceid=298, filesize=20.7 K 2024-11-16T08:39:48,718 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 41fb973b9df933ef140b595dbdc61fe8 in 29ms, sequenceid=298, compaction requested=true 2024-11-16T08:39:48,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:48,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 41fb973b9df933ef140b595dbdc61fe8:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:48,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:48,719 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:48,720 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192921 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:48,720 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): 41fb973b9df933ef140b595dbdc61fe8/info is initiating minor compaction (all files) 2024-11-16T08:39:48,720 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41fb973b9df933ef140b595dbdc61fe8/info in TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:48,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:48,720 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d2879056844b4dcfa1dd71d558612b55, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/919b0f25571d43b0aa1205dc02865bc1, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cd3999e21523448c8fd618fae2927754] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp, totalSize=188.4 K 2024-11-16T08:39:48,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-16T08:39:48,721 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting d2879056844b4dcfa1dd71d558612b55, keycount=135, bloomtype=ROW, size=148.1 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1731746346151 2024-11-16T08:39:48,721 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 919b0f25571d43b0aa1205dc02865bc1, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1731746386630 2024-11-16T08:39:48,721 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd3999e21523448c8fd618fae2927754, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1731746386656 2024-11-16T08:39:48,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bff5e61a12c246758bcc0cb55c13ec2d is 1080, key is row0226/info:/1731746388692/Put/seqid=0 2024-11-16T08:39:48,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741874_1050 (size=19013) 2024-11-16T08:39:48,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741874_1050 (size=19013) 2024-11-16T08:39:48,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bff5e61a12c246758bcc0cb55c13ec2d 2024-11-16T08:39:48,733 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41fb973b9df933ef140b595dbdc61fe8#info#compaction#89 average throughput is 56.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:48,734 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/1149c2a7ee7c4416ab081c2354e20853 is 1080, key is row0062/info:/1731746346151/Put/seqid=0 2024-11-16T08:39:48,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bff5e61a12c246758bcc0cb55c13ec2d as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bff5e61a12c246758bcc0cb55c13ec2d 2024-11-16T08:39:48,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741875_1051 (size=183059) 2024-11-16T08:39:48,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741875_1051 (size=183059) 2024-11-16T08:39:48,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bff5e61a12c246758bcc0cb55c13ec2d, entries=13, sequenceid=314, filesize=18.6 K 2024-11-16T08:39:48,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 41fb973b9df933ef140b595dbdc61fe8 in 22ms, sequenceid=314, compaction requested=false 2024-11-16T08:39:48,742 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/1149c2a7ee7c4416ab081c2354e20853 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/1149c2a7ee7c4416ab081c2354e20853 2024-11-16T08:39:48,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:48,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39489 {}] regionserver.HRegion(8855): Flush requested on 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:48,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-16T08:39:48,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bb6c6021eea84a53b21f8ef9cb8688dd is 1080, key is row0239/info:/1731746388721/Put/seqid=0 2024-11-16T08:39:48,749 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 41fb973b9df933ef140b595dbdc61fe8/info of 41fb973b9df933ef140b595dbdc61fe8 into 1149c2a7ee7c4416ab081c2354e20853(size=178.8 K), total size for store is 197.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:48,749 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:48,749 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., storeName=41fb973b9df933ef140b595dbdc61fe8/info, priority=13, startTime=1731746388719; duration=0sec 2024-11-16T08:39:48,749 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:48,749 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41fb973b9df933ef140b595dbdc61fe8:info 2024-11-16T08:39:48,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741876_1052 (size=16839) 2024-11-16T08:39:48,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741876_1052 (size=16839) 2024-11-16T08:39:48,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bb6c6021eea84a53b21f8ef9cb8688dd 2024-11-16T08:39:48,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bb6c6021eea84a53b21f8ef9cb8688dd as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bb6c6021eea84a53b21f8ef9cb8688dd 2024-11-16T08:39:48,762 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bb6c6021eea84a53b21f8ef9cb8688dd, entries=11, sequenceid=328, filesize=16.4 K 2024-11-16T08:39:48,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=7.36 KB/7532 for 41fb973b9df933ef140b595dbdc61fe8 in 20ms, sequenceid=328, compaction requested=true 2024-11-16T08:39:48,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:48,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 41fb973b9df933ef140b595dbdc61fe8:info, priority=-2147483648, current under compaction store size is 1 2024-11-16T08:39:48,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:48,763 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-16T08:39:48,764 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 218911 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-16T08:39:48,764 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1541): 41fb973b9df933ef140b595dbdc61fe8/info is initiating minor compaction (all files) 2024-11-16T08:39:48,765 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 41fb973b9df933ef140b595dbdc61fe8/info in TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:48,765 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/1149c2a7ee7c4416ab081c2354e20853, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bff5e61a12c246758bcc0cb55c13ec2d, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bb6c6021eea84a53b21f8ef9cb8688dd] into tmpdir=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp, totalSize=213.8 K 2024-11-16T08:39:48,765 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1149c2a7ee7c4416ab081c2354e20853, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1731746346151 2024-11-16T08:39:48,765 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting bff5e61a12c246758bcc0cb55c13ec2d, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1731746388692 2024-11-16T08:39:48,766 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] compactions.Compactor(225): Compacting bb6c6021eea84a53b21f8ef9cb8688dd, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731746388721 2024-11-16T08:39:48,776 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 41fb973b9df933ef140b595dbdc61fe8#info#compaction#91 average throughput is 64.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-16T08:39:48,777 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bea155f553874397872c865bacd30f1b is 1080, key is row0062/info:/1731746346151/Put/seqid=0 2024-11-16T08:39:48,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741877_1053 (size=209134) 2024-11-16T08:39:48,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741877_1053 (size=209134) 2024-11-16T08:39:48,783 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/bea155f553874397872c865bacd30f1b as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bea155f553874397872c865bacd30f1b 2024-11-16T08:39:48,789 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 41fb973b9df933ef140b595dbdc61fe8/info of 41fb973b9df933ef140b595dbdc61fe8 into bea155f553874397872c865bacd30f1b(size=204.2 K), total size for store is 204.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-16T08:39:48,789 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:48,789 INFO [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., storeName=41fb973b9df933ef140b595dbdc61fe8/info, priority=13, startTime=1731746388763; duration=0sec 2024-11-16T08:39:48,789 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-16T08:39:48,789 DEBUG [RS:0;c27dd56784bd:39489-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 41fb973b9df933ef140b595dbdc61fe8:info 2024-11-16T08:39:49,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:49,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:50,028 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-16T08:39:50,028 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=73, reuseRatio=89.02% 2024-11-16T08:39:50,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:50,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:50,756 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-16T08:39:50,756 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C39489%2C1731746332792.1731746390756 2024-11-16T08:39:50,772 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,772 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,772 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,772 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,772 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,772 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.1731746333402 with entries=316, filesize=309.65 KB; new WAL /user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.1731746390756 2024-11-16T08:39:50,773 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42645:42645),(127.0.0.1/127.0.0.1:41793:41793)] 2024-11-16T08:39:50,773 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.1731746333402 is not closed yet, will try archiving it next time 2024-11-16T08:39:50,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741833_1009 (size=317093) 2024-11-16T08:39:50,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741833_1009 (size=317093) 2024-11-16T08:39:50,798 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 41fb973b9df933ef140b595dbdc61fe8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-16T08:39:50,801 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/a7ebcf0c5a7c46aaa60242b27332719e is 1080, key is row0250/info:/1731746388744/Put/seqid=0 2024-11-16T08:39:50,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741879_1055 (size=12523) 2024-11-16T08:39:50,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741879_1055 (size=12523) 2024-11-16T08:39:50,806 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/a7ebcf0c5a7c46aaa60242b27332719e 2024-11-16T08:39:50,811 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/.tmp/info/a7ebcf0c5a7c46aaa60242b27332719e as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/a7ebcf0c5a7c46aaa60242b27332719e 2024-11-16T08:39:50,816 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/a7ebcf0c5a7c46aaa60242b27332719e, entries=7, sequenceid=340, filesize=12.2 K 2024-11-16T08:39:50,817 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 41fb973b9df933ef140b595dbdc61fe8 in 20ms, sequenceid=340, compaction requested=false 2024-11-16T08:39:50,817 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 41fb973b9df933ef140b595dbdc61fe8: 2024-11-16T08:39:50,817 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for f567595c518fa426d00b5514ba689075: 2024-11-16T08:39:50,817 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-16T08:39:50,822 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/info/5578b895aeb847ecb1121da8be037b8e is 193, key is TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8./info:regioninfo/1731746357155/Put/seqid=0 2024-11-16T08:39:50,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741880_1056 (size=6223) 2024-11-16T08:39:50,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741880_1056 (size=6223) 2024-11-16T08:39:50,826 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/info/5578b895aeb847ecb1121da8be037b8e 2024-11-16T08:39:50,831 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/.tmp/info/5578b895aeb847ecb1121da8be037b8e as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/info/5578b895aeb847ecb1121da8be037b8e 2024-11-16T08:39:50,836 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/info/5578b895aeb847ecb1121da8be037b8e, entries=5, sequenceid=21, filesize=6.1 K 2024-11-16T08:39:50,837 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-16T08:39:50,837 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-16T08:39:50,837 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C39489%2C1731746332792.1731746390837 2024-11-16T08:39:50,841 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,841 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,841 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,841 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,841 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:50,841 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.1731746390756 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.1731746390837 2024-11-16T08:39:50,842 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41793:41793),(127.0.0.1/127.0.0.1:42645:42645)] 2024-11-16T08:39:50,842 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.1731746390756 is not closed yet, will try archiving it next time 2024-11-16T08:39:50,842 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.1731746333402 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/oldWALs/c27dd56784bd%2C39489%2C1731746332792.1731746333402 2024-11-16T08:39:50,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741878_1054 (size=731) 2024-11-16T08:39:50,843 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-16T08:39:50,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741878_1054 (size=731) 2024-11-16T08:39:50,844 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/WALs/c27dd56784bd,39489,1731746332792/c27dd56784bd%2C39489%2C1731746332792.1731746390756 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/oldWALs/c27dd56784bd%2C39489%2C1731746332792.1731746390756 2024-11-16T08:39:50,943 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T08:39:50,943 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:39:50,943 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:39:50,943 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:50,943 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:50,943 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T08:39:50,944 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T08:39:50,944 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1536539636, stopped=false 2024-11-16T08:39:50,944 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c27dd56784bd,41263,1731746332596 2024-11-16T08:39:51,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:39:51,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:39:51,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:51,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:51,004 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:39:51,004 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:39:51,004 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:39:51,004 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:51,004 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c27dd56784bd,39489,1731746332792' ***** 2024-11-16T08:39:51,004 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T08:39:51,005 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:39:51,005 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:39:51,005 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T08:39:51,005 INFO [RS:0;c27dd56784bd:39489 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T08:39:51,005 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T08:39:51,005 INFO [RS:0;c27dd56784bd:39489 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T08:39:51,005 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(3091): Received CLOSE for 41fb973b9df933ef140b595dbdc61fe8 2024-11-16T08:39:51,005 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(3091): Received CLOSE for f567595c518fa426d00b5514ba689075 2024-11-16T08:39:51,005 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(959): stopping server c27dd56784bd,39489,1731746332792 2024-11-16T08:39:51,005 INFO [RS:0;c27dd56784bd:39489 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:39:51,005 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 41fb973b9df933ef140b595dbdc61fe8, disabling compactions & flushes 2024-11-16T08:39:51,005 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:51,005 INFO [RS:0;c27dd56784bd:39489 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c27dd56784bd:39489. 2024-11-16T08:39:51,005 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:51,005 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. after waiting 0 ms 2024-11-16T08:39:51,005 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:51,005 DEBUG [RS:0;c27dd56784bd:39489 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:39:51,005 DEBUG [RS:0;c27dd56784bd:39489 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:51,006 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T08:39:51,006 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T08:39:51,006 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T08:39:51,006 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T08:39:51,006 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-16T08:39:51,006 DEBUG [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(1325): Online Regions={41fb973b9df933ef140b595dbdc61fe8=TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8., f567595c518fa426d00b5514ba689075=TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075., 1588230740=hbase:meta,,1.1588230740} 2024-11-16T08:39:51,006 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:39:51,006 DEBUG [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 41fb973b9df933ef140b595dbdc61fe8, f567595c518fa426d00b5514ba689075 2024-11-16T08:39:51,006 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:39:51,006 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:39:51,006 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:39:51,006 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:39:51,006 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03->hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/c713c3d933024f2d9d14f25afb30bd44-top, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/fd7150450bbc47489ad1d46ad67ad842, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/TestLogRolling-testLogRolling=74ff98a5d3b62c48bc6fc81d8bbb1f03-b2ebf9083bcb4cb28b9c9c89572ab81c, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/0404fa91bcbe4c4f86e140161d1d2d51, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a15c5dd81954bc5a87d586f226a2479, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/9cd7bde69cf64852bcfa4a4a1b457162, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/60a0e6f6c27841c5b3c82d633c4c065c, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/348c55e3dcd743e4992328fe5386c199, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/99b7f0dbd9a24ab4a75eed460b3bcd5b, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cc7036ca87cb41899617b1b9734a954a, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bbd1d55de1c44da49cff49f2557eb537, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a2b4081366b4ad9ae81f4b74707dbc3, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d23b28b39cda4b13bd71cf583e51d865, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/05a8d3d30060426e88ce7925857aab9b, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/85c0cb44c1554e6fa2958fcfeaee6642, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/54744597680c43b2be3c522fac7d9c17, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/7ebe4d93110f49fe9a192ad1419b4070, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/4263854d2fef497d875fbb7861b199a3, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/a4d12b163b2947b69b7aa6f6e2aca5d1, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d2879056844b4dcfa1dd71d558612b55, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/037d180a2423428f8eb6960a5c900978, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/919b0f25571d43b0aa1205dc02865bc1, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/1149c2a7ee7c4416ab081c2354e20853, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cd3999e21523448c8fd618fae2927754, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bff5e61a12c246758bcc0cb55c13ec2d, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bb6c6021eea84a53b21f8ef9cb8688dd] to archive 2024-11-16T08:39:51,007 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T08:39:51,008 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:51,010 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/fd7150450bbc47489ad1d46ad67ad842 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/fd7150450bbc47489ad1d46ad67ad842 2024-11-16T08:39:51,010 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-16T08:39:51,011 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:39:51,011 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:39:51,011 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746391006Running coprocessor pre-close hooks at 1731746391006Disabling compacts and flushes for region at 1731746391006Disabling writes for close at 1731746391006Writing region close event to WAL at 1731746391007 (+1 ms)Running coprocessor post-close hooks at 1731746391011 (+4 ms)Closed at 1731746391011 2024-11-16T08:39:51,011 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/TestLogRolling-testLogRolling=74ff98a5d3b62c48bc6fc81d8bbb1f03-b2ebf9083bcb4cb28b9c9c89572ab81c to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/TestLogRolling-testLogRolling=74ff98a5d3b62c48bc6fc81d8bbb1f03-b2ebf9083bcb4cb28b9c9c89572ab81c 2024-11-16T08:39:51,011 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T08:39:51,012 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/0404fa91bcbe4c4f86e140161d1d2d51 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/0404fa91bcbe4c4f86e140161d1d2d51 2024-11-16T08:39:51,013 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a15c5dd81954bc5a87d586f226a2479 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a15c5dd81954bc5a87d586f226a2479 2024-11-16T08:39:51,014 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/9cd7bde69cf64852bcfa4a4a1b457162 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/9cd7bde69cf64852bcfa4a4a1b457162 2024-11-16T08:39:51,015 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/60a0e6f6c27841c5b3c82d633c4c065c to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/60a0e6f6c27841c5b3c82d633c4c065c 2024-11-16T08:39:51,016 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/348c55e3dcd743e4992328fe5386c199 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/348c55e3dcd743e4992328fe5386c199 2024-11-16T08:39:51,018 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/99b7f0dbd9a24ab4a75eed460b3bcd5b to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/99b7f0dbd9a24ab4a75eed460b3bcd5b 2024-11-16T08:39:51,019 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cc7036ca87cb41899617b1b9734a954a to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cc7036ca87cb41899617b1b9734a954a 2024-11-16T08:39:51,020 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bbd1d55de1c44da49cff49f2557eb537 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bbd1d55de1c44da49cff49f2557eb537 2024-11-16T08:39:51,021 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a2b4081366b4ad9ae81f4b74707dbc3 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/2a2b4081366b4ad9ae81f4b74707dbc3 2024-11-16T08:39:51,022 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d23b28b39cda4b13bd71cf583e51d865 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d23b28b39cda4b13bd71cf583e51d865 2024-11-16T08:39:51,023 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/05a8d3d30060426e88ce7925857aab9b to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/05a8d3d30060426e88ce7925857aab9b 2024-11-16T08:39:51,024 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/85c0cb44c1554e6fa2958fcfeaee6642 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/85c0cb44c1554e6fa2958fcfeaee6642 2024-11-16T08:39:51,025 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/54744597680c43b2be3c522fac7d9c17 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/54744597680c43b2be3c522fac7d9c17 2024-11-16T08:39:51,026 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/7ebe4d93110f49fe9a192ad1419b4070 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/7ebe4d93110f49fe9a192ad1419b4070 2024-11-16T08:39:51,026 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/4263854d2fef497d875fbb7861b199a3 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/4263854d2fef497d875fbb7861b199a3 2024-11-16T08:39:51,027 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/a4d12b163b2947b69b7aa6f6e2aca5d1 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/a4d12b163b2947b69b7aa6f6e2aca5d1 2024-11-16T08:39:51,028 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d2879056844b4dcfa1dd71d558612b55 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/d2879056844b4dcfa1dd71d558612b55 2024-11-16T08:39:51,029 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/037d180a2423428f8eb6960a5c900978 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/037d180a2423428f8eb6960a5c900978 2024-11-16T08:39:51,030 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/919b0f25571d43b0aa1205dc02865bc1 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/919b0f25571d43b0aa1205dc02865bc1 2024-11-16T08:39:51,031 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/1149c2a7ee7c4416ab081c2354e20853 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/1149c2a7ee7c4416ab081c2354e20853 2024-11-16T08:39:51,032 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cd3999e21523448c8fd618fae2927754 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/cd3999e21523448c8fd618fae2927754 2024-11-16T08:39:51,033 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bff5e61a12c246758bcc0cb55c13ec2d to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bff5e61a12c246758bcc0cb55c13ec2d 2024-11-16T08:39:51,033 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bb6c6021eea84a53b21f8ef9cb8688dd to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/info/bb6c6021eea84a53b21f8ef9cb8688dd 2024-11-16T08:39:51,034 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c27dd56784bd:41263 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-16T08:39:51,034 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [fd7150450bbc47489ad1d46ad67ad842=8359, 0404fa91bcbe4c4f86e140161d1d2d51=12509, 2a15c5dd81954bc5a87d586f226a2479=31106, 9cd7bde69cf64852bcfa4a4a1b457162=20064, 60a0e6f6c27841c5b3c82d633c4c065c=17896, 348c55e3dcd743e4992328fe5386c199=59266, 99b7f0dbd9a24ab4a75eed460b3bcd5b=20078, cc7036ca87cb41899617b1b9734a954a=16828, bbd1d55de1c44da49cff49f2557eb537=78909, 2a2b4081366b4ad9ae81f4b74707dbc3=12516, d23b28b39cda4b13bd71cf583e51d865=21156, 05a8d3d30060426e88ce7925857aab9b=111359, 85c0cb44c1554e6fa2958fcfeaee6642=21156, 54744597680c43b2be3c522fac7d9c17=12516, 7ebe4d93110f49fe9a192ad1419b4070=130997, 4263854d2fef497d875fbb7861b199a3=16828, a4d12b163b2947b69b7aa6f6e2aca5d1=17906, d2879056844b4dcfa1dd71d558612b55=151658, 037d180a2423428f8eb6960a5c900978=12520, 919b0f25571d43b0aa1205dc02865bc1=20092, 1149c2a7ee7c4416ab081c2354e20853=183059, cd3999e21523448c8fd618fae2927754=21171, bff5e61a12c246758bcc0cb55c13ec2d=19013, bb6c6021eea84a53b21f8ef9cb8688dd=16839] 2024-11-16T08:39:51,038 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/41fb973b9df933ef140b595dbdc61fe8/recovered.edits/343.seqid, newMaxSeqId=343, maxSeqId=88 2024-11-16T08:39:51,038 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:51,038 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 41fb973b9df933ef140b595dbdc61fe8: Waiting for close lock at 1731746391005Running coprocessor pre-close hooks at 1731746391005Disabling compacts and flushes for region at 1731746391005Disabling writes for close at 1731746391005Writing region close event to WAL at 1731746391034 (+29 ms)Running coprocessor post-close hooks at 1731746391038 (+4 ms)Closed at 1731746391038 2024-11-16T08:39:51,038 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731746356377.41fb973b9df933ef140b595dbdc61fe8. 2024-11-16T08:39:51,038 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f567595c518fa426d00b5514ba689075, disabling compactions & flushes 2024-11-16T08:39:51,038 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:51,039 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:51,039 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. after waiting 0 ms 2024-11-16T08:39:51,039 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:51,039 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03->hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/74ff98a5d3b62c48bc6fc81d8bbb1f03/info/c713c3d933024f2d9d14f25afb30bd44-bottom] to archive 2024-11-16T08:39:51,040 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-16T08:39:51,041 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03 to hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/archive/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/info/c713c3d933024f2d9d14f25afb30bd44.74ff98a5d3b62c48bc6fc81d8bbb1f03 2024-11-16T08:39:51,041 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-16T08:39:51,044 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/data/default/TestLogRolling-testLogRolling/f567595c518fa426d00b5514ba689075/recovered.edits/93.seqid, newMaxSeqId=93, maxSeqId=88 2024-11-16T08:39:51,045 INFO [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:51,045 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f567595c518fa426d00b5514ba689075: Waiting for close lock at 1731746391038Running coprocessor pre-close hooks at 1731746391038Disabling compacts and flushes for region at 1731746391038Disabling writes for close at 1731746391039 (+1 ms)Writing region close event to WAL at 1731746391041 (+2 ms)Running coprocessor post-close hooks at 1731746391045 (+4 ms)Closed at 1731746391045 2024-11-16T08:39:51,045 DEBUG [RS_CLOSE_REGION-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731746356377.f567595c518fa426d00b5514ba689075. 2024-11-16T08:39:51,206 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(976): stopping server c27dd56784bd,39489,1731746332792; all regions closed. 2024-11-16T08:39:51,207 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,207 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,207 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,207 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,208 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741834_1010 (size=8107) 2024-11-16T08:39:51,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741834_1010 (size=8107) 2024-11-16T08:39:51,215 DEBUG [RS:0;c27dd56784bd:39489 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/oldWALs 2024-11-16T08:39:51,215 INFO [RS:0;c27dd56784bd:39489 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C39489%2C1731746332792.meta:.meta(num 1731746333748) 2024-11-16T08:39:51,216 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,216 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,217 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,217 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,217 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741881_1057 (size=778) 2024-11-16T08:39:51,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741881_1057 (size=778) 2024-11-16T08:39:51,223 DEBUG [RS:0;c27dd56784bd:39489 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/oldWALs 2024-11-16T08:39:51,223 INFO [RS:0;c27dd56784bd:39489 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C39489%2C1731746332792:(num 1731746390837) 2024-11-16T08:39:51,223 DEBUG [RS:0;c27dd56784bd:39489 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:51,223 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:39:51,223 INFO [RS:0;c27dd56784bd:39489 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:39:51,224 INFO [RS:0;c27dd56784bd:39489 {}] hbase.ChoreService(370): Chore service for: regionserver/c27dd56784bd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-16T08:39:51,224 INFO [RS:0;c27dd56784bd:39489 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:39:51,224 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:39:51,224 INFO [RS:0;c27dd56784bd:39489 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39489 2024-11-16T08:39:51,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:39:51,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c27dd56784bd,39489,1731746332792 2024-11-16T08:39:51,235 INFO [RS:0;c27dd56784bd:39489 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:39:51,246 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c27dd56784bd,39489,1731746332792] 2024-11-16T08:39:51,256 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c27dd56784bd,39489,1731746332792 already deleted, retry=false 2024-11-16T08:39:51,256 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c27dd56784bd,39489,1731746332792 expired; onlineServers=0 2024-11-16T08:39:51,256 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c27dd56784bd,41263,1731746332596' ***** 2024-11-16T08:39:51,256 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T08:39:51,256 INFO [M:0;c27dd56784bd:41263 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:39:51,256 INFO [M:0;c27dd56784bd:41263 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:39:51,256 DEBUG [M:0;c27dd56784bd:41263 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T08:39:51,257 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T08:39:51,257 DEBUG [M:0;c27dd56784bd:41263 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T08:39:51,257 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746333128 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746333128,5,FailOnTimeoutGroup] 2024-11-16T08:39:51,257 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746333128 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746333128,5,FailOnTimeoutGroup] 2024-11-16T08:39:51,257 INFO [M:0;c27dd56784bd:41263 {}] hbase.ChoreService(370): Chore service for: master/c27dd56784bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T08:39:51,257 INFO [M:0;c27dd56784bd:41263 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:39:51,257 DEBUG [M:0;c27dd56784bd:41263 {}] master.HMaster(1795): Stopping service threads 2024-11-16T08:39:51,257 INFO [M:0;c27dd56784bd:41263 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T08:39:51,257 INFO [M:0;c27dd56784bd:41263 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:39:51,257 INFO [M:0;c27dd56784bd:41263 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T08:39:51,257 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T08:39:51,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T08:39:51,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:51,267 DEBUG [M:0;c27dd56784bd:41263 {}] zookeeper.ZKUtil(347): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T08:39:51,267 WARN [M:0;c27dd56784bd:41263 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T08:39:51,268 INFO [M:0;c27dd56784bd:41263 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/.lastflushedseqids 2024-11-16T08:39:51,270 INFO [regionserver/c27dd56784bd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:39:51,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741882_1058 (size=228) 2024-11-16T08:39:51,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741882_1058 (size=228) 2024-11-16T08:39:51,275 INFO [M:0;c27dd56784bd:41263 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T08:39:51,275 INFO [M:0;c27dd56784bd:41263 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T08:39:51,275 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:39:51,275 INFO [M:0;c27dd56784bd:41263 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:51,275 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:51,275 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:39:51,275 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:51,276 INFO [M:0;c27dd56784bd:41263 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-16T08:39:51,294 DEBUG [M:0;c27dd56784bd:41263 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f385293987124b1586eea11c13dd430d is 82, key is hbase:meta,,1/info:regioninfo/1731746333778/Put/seqid=0 2024-11-16T08:39:51,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741883_1059 (size=5672) 2024-11-16T08:39:51,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741883_1059 (size=5672) 2024-11-16T08:39:51,302 INFO [M:0;c27dd56784bd:41263 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f385293987124b1586eea11c13dd430d 2024-11-16T08:39:51,320 DEBUG [M:0;c27dd56784bd:41263 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a1ae5e7741bb4c798dabb97b02ecb23d is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731746334310/Put/seqid=0 2024-11-16T08:39:51,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741884_1060 (size=7090) 2024-11-16T08:39:51,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741884_1060 (size=7090) 2024-11-16T08:39:51,324 INFO [M:0;c27dd56784bd:41263 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a1ae5e7741bb4c798dabb97b02ecb23d 2024-11-16T08:39:51,328 INFO [M:0;c27dd56784bd:41263 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a1ae5e7741bb4c798dabb97b02ecb23d 2024-11-16T08:39:51,343 DEBUG [M:0;c27dd56784bd:41263 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/afd10beeccd74e4ca2b32b9f63e59957 is 69, key is c27dd56784bd,39489,1731746332792/rs:state/1731746333247/Put/seqid=0 2024-11-16T08:39:51,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:39:51,346 INFO [RS:0;c27dd56784bd:39489 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:39:51,346 INFO [RS:0;c27dd56784bd:39489 {}] regionserver.HRegionServer(1031): Exiting; stopping=c27dd56784bd,39489,1731746332792; zookeeper connection closed. 2024-11-16T08:39:51,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39489-0x10142cbfb4f0001, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:39:51,346 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7cdb0c29 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7cdb0c29 2024-11-16T08:39:51,347 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T08:39:51,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741885_1061 (size=5156) 2024-11-16T08:39:51,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741885_1061 (size=5156) 2024-11-16T08:39:51,347 INFO [M:0;c27dd56784bd:41263 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/afd10beeccd74e4ca2b32b9f63e59957 2024-11-16T08:39:51,364 DEBUG [M:0;c27dd56784bd:41263 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/726d8631e8a74ee5b44dc16cc92b2b8c is 52, key is load_balancer_on/state:d/1731746333924/Put/seqid=0 2024-11-16T08:39:51,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741886_1062 (size=5056) 2024-11-16T08:39:51,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741886_1062 (size=5056) 2024-11-16T08:39:51,369 INFO [M:0;c27dd56784bd:41263 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/726d8631e8a74ee5b44dc16cc92b2b8c 2024-11-16T08:39:51,373 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f385293987124b1586eea11c13dd430d as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f385293987124b1586eea11c13dd430d 2024-11-16T08:39:51,378 INFO [M:0;c27dd56784bd:41263 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f385293987124b1586eea11c13dd430d, entries=8, sequenceid=125, filesize=5.5 K 2024-11-16T08:39:51,379 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a1ae5e7741bb4c798dabb97b02ecb23d as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a1ae5e7741bb4c798dabb97b02ecb23d 2024-11-16T08:39:51,384 INFO [M:0;c27dd56784bd:41263 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a1ae5e7741bb4c798dabb97b02ecb23d 2024-11-16T08:39:51,384 INFO [M:0;c27dd56784bd:41263 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a1ae5e7741bb4c798dabb97b02ecb23d, entries=13, sequenceid=125, filesize=6.9 K 2024-11-16T08:39:51,385 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/afd10beeccd74e4ca2b32b9f63e59957 as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/afd10beeccd74e4ca2b32b9f63e59957 2024-11-16T08:39:51,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:51,390 INFO [M:0;c27dd56784bd:41263 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/afd10beeccd74e4ca2b32b9f63e59957, entries=1, sequenceid=125, filesize=5.0 K 2024-11-16T08:39:51,391 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/726d8631e8a74ee5b44dc16cc92b2b8c as hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/726d8631e8a74ee5b44dc16cc92b2b8c 2024-11-16T08:39:51,395 INFO [M:0;c27dd56784bd:41263 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33067/user/jenkins/test-data/1fe965a6-0450-698d-d929-c55c3ba57bc4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/726d8631e8a74ee5b44dc16cc92b2b8c, entries=1, sequenceid=125, filesize=4.9 K 2024-11-16T08:39:51,396 INFO [M:0;c27dd56784bd:41263 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=125, compaction requested=false 2024-11-16T08:39:51,398 INFO [M:0;c27dd56784bd:41263 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:51,398 DEBUG [M:0;c27dd56784bd:41263 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746391275Disabling compacts and flushes for region at 1731746391275Disabling writes for close at 1731746391275Obtaining lock to block concurrent updates at 1731746391276 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731746391276Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731746391276Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731746391277 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731746391277Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731746391294 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731746391294Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731746391306 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731746391319 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731746391319Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731746391328 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731746391342 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731746391342Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731746391351 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731746391364 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731746391364Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65506d70: reopening flushed file at 1731746391372 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c9cbcb6: reopening flushed file at 1731746391378 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d7e05ec: reopening flushed file at 1731746391384 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43dc92ba: reopening flushed file at 1731746391390 (+6 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=125, compaction requested=false at 1731746391396 (+6 ms)Writing region close event to WAL at 1731746391398 (+2 ms)Closed at 1731746391398 2024-11-16T08:39:51,398 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,398 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,398 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,398 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,398 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:51,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42791 is added to blk_1073741830_1006 (size=61320) 2024-11-16T08:39:51,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43469 is added to blk_1073741830_1006 (size=61320) 2024-11-16T08:39:51,401 INFO [M:0;c27dd56784bd:41263 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T08:39:51,401 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:39:51,401 INFO [M:0;c27dd56784bd:41263 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41263 2024-11-16T08:39:51,401 INFO [M:0;c27dd56784bd:41263 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:39:51,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:51,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:39:51,513 INFO [M:0;c27dd56784bd:41263 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:39:51,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41263-0x10142cbfb4f0000, quorum=127.0.0.1:62779, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:39:51,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20408ea5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:39:51,516 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e2ab763{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:39:51,516 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:39:51,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f4aa33e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:39:51,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47bcda8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.log.dir/,STOPPED} 2024-11-16T08:39:51,518 WARN [BP-1154009425-172.17.0.3-1731746330336 heartbeating to localhost/127.0.0.1:33067 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:39:51,518 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:39:51,518 WARN [BP-1154009425-172.17.0.3-1731746330336 heartbeating to localhost/127.0.0.1:33067 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1154009425-172.17.0.3-1731746330336 (Datanode Uuid 8622081f-7e6d-4622-8803-3a1e0d008227) service to localhost/127.0.0.1:33067 2024-11-16T08:39:51,518 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:39:51,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/data/data3/current/BP-1154009425-172.17.0.3-1731746330336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:39:51,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/data/data4/current/BP-1154009425-172.17.0.3-1731746330336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:39:51,519 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:39:51,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64358886{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:39:51,522 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26a9d62d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:39:51,522 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:39:51,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@601b78f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:39:51,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23e1642c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.log.dir/,STOPPED} 2024-11-16T08:39:51,524 WARN [BP-1154009425-172.17.0.3-1731746330336 heartbeating to localhost/127.0.0.1:33067 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:39:51,524 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:39:51,524 WARN [BP-1154009425-172.17.0.3-1731746330336 heartbeating to localhost/127.0.0.1:33067 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1154009425-172.17.0.3-1731746330336 (Datanode Uuid e4b9af89-3b71-409a-adbe-3debfbd4f60f) service to localhost/127.0.0.1:33067 2024-11-16T08:39:51,524 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:39:51,524 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/data/data1/current/BP-1154009425-172.17.0.3-1731746330336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:39:51,525 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/cluster_d29cf617-63ba-fed4-6030-6a3e47b4a9f0/data/data2/current/BP-1154009425-172.17.0.3-1731746330336 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:39:51,525 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:39:51,532 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b6783f5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:39:51,532 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@44a25975{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:39:51,532 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:39:51,533 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f1304aa{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:39:51,533 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7193a060{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.log.dir/,STOPPED} 2024-11-16T08:39:51,541 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T08:39:51,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T08:39:51,577 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 208) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:33067 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:33067 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:33067 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33067 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33067 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:33067 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=509 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=218 (was 296), ProcessCount=11 (was 11), AvailableMemoryMB=4162 (was 1723) - AvailableMemoryMB LEAK? - 2024-11-16T08:39:51,585 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=218, ProcessCount=11, AvailableMemoryMB=4162 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.log.dir so I do NOT create it in target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8570b207-6f5d-45ba-22ff-51efcb0b35f3/hadoop.tmp.dir so I do NOT create it in target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254, deleteOnExit=true 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/test.cache.data in system properties and HBase conf 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/hadoop.tmp.dir in system properties and HBase conf 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/hadoop.log.dir in system properties and HBase conf 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-16T08:39:51,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-16T08:39:51,586 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/nfs.dump.dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/java.io.tmpdir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-16T08:39:51,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-16T08:39:51,598 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:39:51,957 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:39:51,960 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:39:51,964 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:39:51,964 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:39:51,965 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:39:51,965 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:39:51,965 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2191d18b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:39:51,966 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e35321a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:39:52,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f9a8217{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/java.io.tmpdir/jetty-localhost-42957-hadoop-hdfs-3_4_1-tests_jar-_-any-11301951308918635926/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:39:52,060 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@b72d363{HTTP/1.1, (http/1.1)}{localhost:42957} 2024-11-16T08:39:52,060 INFO [Time-limited test {}] server.Server(415): Started @310296ms 2024-11-16T08:39:52,070 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-16T08:39:52,316 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:39:52,319 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:39:52,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:39:52,319 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:39:52,319 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:39:52,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46761010{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:39:52,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b42f247{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:39:52,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:52,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f5abbb6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/java.io.tmpdir/jetty-localhost-39635-hadoop-hdfs-3_4_1-tests_jar-_-any-3901699117266410203/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:39:52,411 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7fcd61c6{HTTP/1.1, (http/1.1)}{localhost:39635} 2024-11-16T08:39:52,411 INFO [Time-limited test {}] server.Server(415): Started @310647ms 2024-11-16T08:39:52,412 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:39:52,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-16T08:39:52,436 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-16T08:39:52,437 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-16T08:39:52,437 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-16T08:39:52,437 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-16T08:39:52,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@624ed4c3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/hadoop.log.dir/,AVAILABLE} 2024-11-16T08:39:52,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7acee9f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-16T08:39:52,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:52,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1efd209{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/java.io.tmpdir/jetty-localhost-41545-hadoop-hdfs-3_4_1-tests_jar-_-any-11542321648464821350/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:39:52,529 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@49e6dd92{HTTP/1.1, (http/1.1)}{localhost:41545} 2024-11-16T08:39:52,529 INFO [Time-limited test {}] server.Server(415): Started @310765ms 2024-11-16T08:39:52,530 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-16T08:39:53,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:53,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:53,626 WARN [Thread-2489 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/data/data1/current/BP-1292931844-172.17.0.3-1731746391601/current, will proceed with Du for space computation calculation, 2024-11-16T08:39:53,626 WARN [Thread-2490 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/data/data2/current/BP-1292931844-172.17.0.3-1731746391601/current, will proceed with Du for space computation calculation, 2024-11-16T08:39:53,640 WARN [Thread-2453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:39:53,642 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6af284ee7560ab88 with lease ID 0x2b8ab4624a099125: Processing first storage report for DS-00bff00b-70c6-4e50-a602-54eceb9c18b0 from datanode DatanodeRegistration(127.0.0.1:33449, datanodeUuid=43d61ea2-4a73-4293-a679-578846cbf337, infoPort=35539, infoSecurePort=0, ipcPort=32991, storageInfo=lv=-57;cid=testClusterID;nsid=333473052;c=1731746391601) 2024-11-16T08:39:53,642 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6af284ee7560ab88 with lease ID 0x2b8ab4624a099125: from storage DS-00bff00b-70c6-4e50-a602-54eceb9c18b0 node DatanodeRegistration(127.0.0.1:33449, datanodeUuid=43d61ea2-4a73-4293-a679-578846cbf337, infoPort=35539, infoSecurePort=0, ipcPort=32991, storageInfo=lv=-57;cid=testClusterID;nsid=333473052;c=1731746391601), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-16T08:39:53,642 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6af284ee7560ab88 with lease ID 0x2b8ab4624a099125: Processing first storage report for DS-ec0d0514-1910-4b2e-9ec4-2894556a6e03 from datanode DatanodeRegistration(127.0.0.1:33449, datanodeUuid=43d61ea2-4a73-4293-a679-578846cbf337, infoPort=35539, infoSecurePort=0, ipcPort=32991, storageInfo=lv=-57;cid=testClusterID;nsid=333473052;c=1731746391601) 2024-11-16T08:39:53,642 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6af284ee7560ab88 with lease ID 0x2b8ab4624a099125: from storage DS-ec0d0514-1910-4b2e-9ec4-2894556a6e03 node DatanodeRegistration(127.0.0.1:33449, datanodeUuid=43d61ea2-4a73-4293-a679-578846cbf337, infoPort=35539, infoSecurePort=0, ipcPort=32991, storageInfo=lv=-57;cid=testClusterID;nsid=333473052;c=1731746391601), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:39:53,759 WARN [Thread-2500 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/data/data3/current/BP-1292931844-172.17.0.3-1731746391601/current, will proceed with Du for space computation calculation, 2024-11-16T08:39:53,759 WARN [Thread-2501 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/data/data4/current/BP-1292931844-172.17.0.3-1731746391601/current, will proceed with Du for space computation calculation, 2024-11-16T08:39:53,778 WARN [Thread-2476 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-16T08:39:53,780 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfa86c1369c825af5 with lease ID 0x2b8ab4624a099126: Processing first storage report for DS-b1b8f004-fff8-4218-a4db-9ca85ca14476 from datanode DatanodeRegistration(127.0.0.1:35071, datanodeUuid=ba1bdb30-b242-4624-94f8-9cedf766b0f7, infoPort=36863, infoSecurePort=0, ipcPort=34645, storageInfo=lv=-57;cid=testClusterID;nsid=333473052;c=1731746391601) 2024-11-16T08:39:53,780 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa86c1369c825af5 with lease ID 0x2b8ab4624a099126: from storage DS-b1b8f004-fff8-4218-a4db-9ca85ca14476 node DatanodeRegistration(127.0.0.1:35071, datanodeUuid=ba1bdb30-b242-4624-94f8-9cedf766b0f7, infoPort=36863, infoSecurePort=0, ipcPort=34645, storageInfo=lv=-57;cid=testClusterID;nsid=333473052;c=1731746391601), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:39:53,780 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfa86c1369c825af5 with lease ID 0x2b8ab4624a099126: Processing first storage report for DS-298ede3f-f955-4977-8b95-b745ba70d76e from datanode DatanodeRegistration(127.0.0.1:35071, datanodeUuid=ba1bdb30-b242-4624-94f8-9cedf766b0f7, infoPort=36863, infoSecurePort=0, ipcPort=34645, storageInfo=lv=-57;cid=testClusterID;nsid=333473052;c=1731746391601) 2024-11-16T08:39:53,780 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa86c1369c825af5 with lease ID 0x2b8ab4624a099126: from storage DS-298ede3f-f955-4977-8b95-b745ba70d76e node DatanodeRegistration(127.0.0.1:35071, datanodeUuid=ba1bdb30-b242-4624-94f8-9cedf766b0f7, infoPort=36863, infoSecurePort=0, ipcPort=34645, storageInfo=lv=-57;cid=testClusterID;nsid=333473052;c=1731746391601), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-16T08:39:53,862 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9 2024-11-16T08:39:53,868 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/zookeeper_0, clientPort=50289, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-16T08:39:53,869 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50289 2024-11-16T08:39:53,869 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:39:53,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:39:53,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:39:53,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741825_1001 (size=7) 2024-11-16T08:39:53,878 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd with version=8 2024-11-16T08:39:53,878 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:46127/user/jenkins/test-data/f0389100-e52c-c2fb-c497-b11fb3fdb42d/hbase-staging 2024-11-16T08:39:53,880 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:39:53,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:39:53,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:39:53,880 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:39:53,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:39:53,880 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:39:53,880 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-16T08:39:53,880 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:39:53,881 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41633 2024-11-16T08:39:53,882 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41633 connecting to ZooKeeper ensemble=127.0.0.1:50289 2024-11-16T08:39:53,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:416330x0, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:39:53,939 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41633-0x10142cceab60000 connected 2024-11-16T08:39:54,026 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:39:54,029 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:39:54,034 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:39:54,034 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd, hbase.cluster.distributed=false 2024-11-16T08:39:54,037 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:39:54,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41633 2024-11-16T08:39:54,037 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41633 2024-11-16T08:39:54,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41633 2024-11-16T08:39:54,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41633 2024-11-16T08:39:54,038 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41633 2024-11-16T08:39:54,050 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c27dd56784bd:0 server-side Connection retries=45 2024-11-16T08:39:54,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:39:54,050 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-16T08:39:54,050 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-16T08:39:54,051 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-16T08:39:54,051 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-16T08:39:54,051 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-16T08:39:54,051 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-16T08:39:54,051 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39265 2024-11-16T08:39:54,052 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39265 connecting to ZooKeeper ensemble=127.0.0.1:50289 2024-11-16T08:39:54,052 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:39:54,054 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:39:54,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392650x0, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-16T08:39:54,067 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39265-0x10142cceab60001 connected 2024-11-16T08:39:54,067 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:39:54,067 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-16T08:39:54,068 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-16T08:39:54,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-16T08:39:54,070 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-16T08:39:54,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39265 2024-11-16T08:39:54,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39265 2024-11-16T08:39:54,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39265 2024-11-16T08:39:54,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39265 2024-11-16T08:39:54,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39265 2024-11-16T08:39:54,084 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c27dd56784bd:41633 2024-11-16T08:39:54,084 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c27dd56784bd,41633,1731746393880 2024-11-16T08:39:54,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:39:54,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:39:54,098 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c27dd56784bd,41633,1731746393880 2024-11-16T08:39:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-16T08:39:54,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,110 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-16T08:39:54,110 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c27dd56784bd,41633,1731746393880 from backup master directory 2024-11-16T08:39:54,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c27dd56784bd,41633,1731746393880 2024-11-16T08:39:54,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:39:54,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-16T08:39:54,120 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:39:54,120 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c27dd56784bd,41633,1731746393880 2024-11-16T08:39:54,126 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/hbase.id] with ID: 01f5343d-0cd0-435f-a667-23c580aa6c8a 2024-11-16T08:39:54,126 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/.tmp/hbase.id 2024-11-16T08:39:54,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:39:54,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741826_1002 (size=42) 2024-11-16T08:39:54,135 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/.tmp/hbase.id]:[hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/hbase.id] 2024-11-16T08:39:54,153 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:39:54,153 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-16T08:39:54,155 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-16T08:39:54,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:39:54,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741827_1003 (size=196) 2024-11-16T08:39:54,167 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-16T08:39:54,168 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-16T08:39:54,168 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:39:54,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:39:54,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741828_1004 (size=1189) 2024-11-16T08:39:54,174 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store 2024-11-16T08:39:54,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:39:54,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741829_1005 (size=34) 2024-11-16T08:39:54,180 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:39:54,180 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:39:54,180 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:54,180 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:54,180 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:39:54,180 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:54,180 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:54,180 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746394180Disabling compacts and flushes for region at 1731746394180Disabling writes for close at 1731746394180Writing region close event to WAL at 1731746394180Closed at 1731746394180 2024-11-16T08:39:54,180 WARN [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/.initializing 2024-11-16T08:39:54,180 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/WALs/c27dd56784bd,41633,1731746393880 2024-11-16T08:39:54,182 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C41633%2C1731746393880, suffix=, logDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/WALs/c27dd56784bd,41633,1731746393880, archiveDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/oldWALs, maxLogs=10 2024-11-16T08:39:54,182 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C41633%2C1731746393880.1731746394182 2024-11-16T08:39:54,186 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/WALs/c27dd56784bd,41633,1731746393880/c27dd56784bd%2C41633%2C1731746393880.1731746394182 2024-11-16T08:39:54,187 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35539:35539),(127.0.0.1/127.0.0.1:36863:36863)] 2024-11-16T08:39:54,188 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:39:54,188 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:39:54,188 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,188 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-16T08:39:54,190 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:54,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:39:54,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-16T08:39:54,192 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:54,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:39:54,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-16T08:39:54,194 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:54,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:39:54,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-16T08:39:54,195 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:54,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-16T08:39:54,196 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,197 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,197 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,199 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,199 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,200 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-16T08:39:54,201 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-16T08:39:54,203 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:39:54,204 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808156, jitterRate=0.02762460708618164}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-16T08:39:54,204 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731746394188Initializing all the Stores at 1731746394189 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746394189Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746394189Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746394189Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746394189Cleaning up temporary data from old regions at 1731746394199 (+10 ms)Region opened successfully at 1731746394204 (+5 ms) 2024-11-16T08:39:54,204 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-16T08:39:54,207 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8dd55f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:39:54,208 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-16T08:39:54,208 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-16T08:39:54,209 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-16T08:39:54,209 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-16T08:39:54,209 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-16T08:39:54,210 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-16T08:39:54,210 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-16T08:39:54,212 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-16T08:39:54,213 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-16T08:39:54,225 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-16T08:39:54,225 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-16T08:39:54,226 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-16T08:39:54,235 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-16T08:39:54,236 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-16T08:39:54,238 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-16T08:39:54,246 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-16T08:39:54,248 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-16T08:39:54,256 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-16T08:39:54,261 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-16T08:39:54,267 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-16T08:39:54,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:39:54,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-16T08:39:54,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,278 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c27dd56784bd,41633,1731746393880, sessionid=0x10142cceab60000, setting cluster-up flag (Was=false) 2024-11-16T08:39:54,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,330 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-16T08:39:54,332 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,41633,1731746393880 2024-11-16T08:39:54,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,383 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-16T08:39:54,385 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c27dd56784bd,41633,1731746393880 2024-11-16T08:39:54,386 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-16T08:39:54,387 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-16T08:39:54,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-16T08:39:54,388 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-16T08:39:54,388 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c27dd56784bd,41633,1731746393880 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-16T08:39:54,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:54,389 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:39:54,389 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:39:54,389 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:39:54,389 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c27dd56784bd:0, corePoolSize=5, maxPoolSize=5 2024-11-16T08:39:54,389 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c27dd56784bd:0, corePoolSize=10, maxPoolSize=10 2024-11-16T08:39:54,389 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,389 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:39:54,389 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,391 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731746424391 2024-11-16T08:39:54,391 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-16T08:39:54,391 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-16T08:39:54,391 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-16T08:39:54,391 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-16T08:39:54,391 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-16T08:39:54,391 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-16T08:39:54,391 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:39:54,391 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-16T08:39:54,392 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746394392,5,FailOnTimeoutGroup] 2024-11-16T08:39:54,392 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:54,392 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746394392,5,FailOnTimeoutGroup] 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,392 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,392 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-16T08:39:54,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:39:54,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741831_1007 (size=1321) 2024-11-16T08:39:54,400 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-16T08:39:54,400 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd 2024-11-16T08:39:54,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:39:54,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741832_1008 (size=32) 2024-11-16T08:39:54,406 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:39:54,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:39:54,408 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:39:54,408 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:54,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:39:54,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:39:54,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:39:54,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:54,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:39:54,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:39:54,410 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:39:54,410 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:54,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:39:54,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:39:54,411 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:39:54,411 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:54,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:39:54,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:39:54,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740 2024-11-16T08:39:54,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740 2024-11-16T08:39:54,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:39:54,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:39:54,414 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:39:54,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:39:54,416 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-16T08:39:54,417 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742101, jitterRate=-0.056370809674263}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:39:54,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731746394406Initializing all the Stores at 1731746394406Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746394406Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746394407 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746394407Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746394407Cleaning up temporary data from old regions at 1731746394414 (+7 ms)Region opened successfully at 1731746394417 (+3 ms) 2024-11-16T08:39:54,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:39:54,417 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:39:54,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:39:54,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:39:54,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:39:54,418 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:39:54,418 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746394417Disabling compacts and flushes for region at 1731746394417Disabling writes for close at 1731746394417Writing region close event to WAL at 1731746394418 (+1 ms)Closed at 1731746394418 2024-11-16T08:39:54,419 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:39:54,419 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-16T08:39:54,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-16T08:39:54,420 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:39:54,420 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-16T08:39:54,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:54,475 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(746): ClusterId : 01f5343d-0cd0-435f-a667-23c580aa6c8a 2024-11-16T08:39:54,475 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-16T08:39:54,490 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-16T08:39:54,491 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-16T08:39:54,501 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-16T08:39:54,502 DEBUG [RS:0;c27dd56784bd:39265 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ad9ed20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c27dd56784bd/172.17.0.3:0 2024-11-16T08:39:54,522 DEBUG [RS:0;c27dd56784bd:39265 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c27dd56784bd:39265 2024-11-16T08:39:54,522 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-16T08:39:54,522 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-16T08:39:54,522 DEBUG [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-16T08:39:54,523 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(2659): reportForDuty to master=c27dd56784bd,41633,1731746393880 with port=39265, startcode=1731746394050 2024-11-16T08:39:54,523 DEBUG [RS:0;c27dd56784bd:39265 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-16T08:39:54,525 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38011, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-16T08:39:54,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41633 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c27dd56784bd,39265,1731746394050 2024-11-16T08:39:54,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41633 {}] master.ServerManager(517): Registering regionserver=c27dd56784bd,39265,1731746394050 2024-11-16T08:39:54,526 DEBUG [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd 2024-11-16T08:39:54,526 DEBUG [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35073 2024-11-16T08:39:54,526 DEBUG [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-16T08:39:54,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:39:54,540 DEBUG [RS:0;c27dd56784bd:39265 {}] zookeeper.ZKUtil(111): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c27dd56784bd,39265,1731746394050 2024-11-16T08:39:54,540 WARN [RS:0;c27dd56784bd:39265 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-16T08:39:54,540 INFO [RS:0;c27dd56784bd:39265 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:39:54,540 DEBUG [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/c27dd56784bd,39265,1731746394050 2024-11-16T08:39:54,540 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c27dd56784bd,39265,1731746394050] 2024-11-16T08:39:54,543 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-16T08:39:54,544 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-16T08:39:54,545 INFO [RS:0;c27dd56784bd:39265 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-16T08:39:54,545 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,545 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-16T08:39:54,545 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-16T08:39:54,545 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c27dd56784bd:0, corePoolSize=2, maxPoolSize=2 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c27dd56784bd:0, corePoolSize=1, maxPoolSize=1 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:39:54,546 DEBUG [RS:0;c27dd56784bd:39265 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c27dd56784bd:0, corePoolSize=3, maxPoolSize=3 2024-11-16T08:39:54,546 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,546 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,546 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,546 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,546 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,546 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,39265,1731746394050-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:39:54,558 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-16T08:39:54,559 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,39265,1731746394050-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,559 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,559 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.Replication(171): c27dd56784bd,39265,1731746394050 started 2024-11-16T08:39:54,570 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:54,571 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(1482): Serving as c27dd56784bd,39265,1731746394050, RpcServer on c27dd56784bd/172.17.0.3:39265, sessionid=0x10142cceab60001 2024-11-16T08:39:54,571 WARN [c27dd56784bd:41633 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-16T08:39:54,571 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-16T08:39:54,571 DEBUG [RS:0;c27dd56784bd:39265 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c27dd56784bd,39265,1731746394050 2024-11-16T08:39:54,571 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,39265,1731746394050' 2024-11-16T08:39:54,571 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-16T08:39:54,571 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-16T08:39:54,572 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-16T08:39:54,572 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-16T08:39:54,572 DEBUG [RS:0;c27dd56784bd:39265 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c27dd56784bd,39265,1731746394050 2024-11-16T08:39:54,572 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c27dd56784bd,39265,1731746394050' 2024-11-16T08:39:54,572 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-16T08:39:54,572 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-16T08:39:54,572 DEBUG [RS:0;c27dd56784bd:39265 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-16T08:39:54,572 INFO [RS:0;c27dd56784bd:39265 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-16T08:39:54,572 INFO [RS:0;c27dd56784bd:39265 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-16T08:39:54,675 INFO [RS:0;c27dd56784bd:39265 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C39265%2C1731746394050, suffix=, logDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/c27dd56784bd,39265,1731746394050, archiveDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/oldWALs, maxLogs=32 2024-11-16T08:39:54,676 INFO [RS:0;c27dd56784bd:39265 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C39265%2C1731746394050.1731746394676 2024-11-16T08:39:54,685 INFO [RS:0;c27dd56784bd:39265 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/c27dd56784bd,39265,1731746394050/c27dd56784bd%2C39265%2C1731746394050.1731746394676 2024-11-16T08:39:54,686 DEBUG [RS:0;c27dd56784bd:39265 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36863:36863),(127.0.0.1/127.0.0.1:35539:35539)] 2024-11-16T08:39:54,821 DEBUG [c27dd56784bd:41633 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-16T08:39:54,822 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c27dd56784bd,39265,1731746394050 2024-11-16T08:39:54,824 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,39265,1731746394050, state=OPENING 2024-11-16T08:39:54,835 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-16T08:39:54,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:54,848 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:39:54,848 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:39:54,848 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-16T08:39:54,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,39265,1731746394050}] 2024-11-16T08:39:55,005 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-16T08:39:55,010 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48189, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-16T08:39:55,017 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-16T08:39:55,017 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:39:55,020 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c27dd56784bd%2C39265%2C1731746394050.meta, suffix=.meta, logDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/c27dd56784bd,39265,1731746394050, archiveDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/oldWALs, maxLogs=32 2024-11-16T08:39:55,021 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c27dd56784bd%2C39265%2C1731746394050.meta.1731746395020.meta 2024-11-16T08:39:55,025 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/c27dd56784bd,39265,1731746394050/c27dd56784bd%2C39265%2C1731746394050.meta.1731746395020.meta 2024-11-16T08:39:55,029 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36863:36863),(127.0.0.1/127.0.0.1:35539:35539)] 2024-11-16T08:39:55,033 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-16T08:39:55,033 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-16T08:39:55,033 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-16T08:39:55,033 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-16T08:39:55,033 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-16T08:39:55,033 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-16T08:39:55,033 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-16T08:39:55,033 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-16T08:39:55,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-16T08:39:55,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-16T08:39:55,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:55,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:39:55,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-16T08:39:55,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-16T08:39:55,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:55,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:39:55,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-16T08:39:55,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-16T08:39:55,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:55,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:39:55,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-16T08:39:55,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-16T08:39:55,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-16T08:39:55,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-16T08:39:55,039 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-16T08:39:55,039 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740 2024-11-16T08:39:55,040 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740 2024-11-16T08:39:55,041 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-16T08:39:55,041 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-16T08:39:55,042 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-16T08:39:55,043 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-16T08:39:55,043 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857226, jitterRate=0.09001940488815308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-16T08:39:55,044 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-16T08:39:55,044 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731746395033Writing region info on filesystem at 1731746395033Initializing all the Stores at 1731746395034 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746395034Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746395035 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731746395035Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731746395035Cleaning up temporary data from old regions at 1731746395041 (+6 ms)Running coprocessor post-open hooks at 1731746395044 (+3 ms)Region opened successfully at 1731746395044 2024-11-16T08:39:55,045 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731746395005 2024-11-16T08:39:55,047 DEBUG [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-16T08:39:55,047 INFO [RS_OPEN_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-16T08:39:55,047 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c27dd56784bd,39265,1731746394050 2024-11-16T08:39:55,048 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c27dd56784bd,39265,1731746394050, state=OPEN 2024-11-16T08:39:55,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:39:55,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-16T08:39:55,118 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c27dd56784bd,39265,1731746394050 2024-11-16T08:39:55,118 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:39:55,118 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-16T08:39:55,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-16T08:39:55,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c27dd56784bd,39265,1731746394050 in 270 msec 2024-11-16T08:39:55,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-16T08:39:55,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 701 msec 2024-11-16T08:39:55,123 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-16T08:39:55,123 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-16T08:39:55,124 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:39:55,124 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,39265,1731746394050, seqNum=-1] 2024-11-16T08:39:55,124 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:39:55,125 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48201, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:39:55,130 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 742 msec 2024-11-16T08:39:55,130 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731746395130, completionTime=-1 2024-11-16T08:39:55,130 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-16T08:39:55,130 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-16T08:39:55,132 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-16T08:39:55,132 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731746455132 2024-11-16T08:39:55,132 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731746515132 2024-11-16T08:39:55,132 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-16T08:39:55,133 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41633,1731746393880-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:55,133 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41633,1731746393880-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:55,133 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41633,1731746393880-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:55,133 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c27dd56784bd:41633, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:55,133 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:55,133 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:55,134 DEBUG [master/c27dd56784bd:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-16T08:39:55,136 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.016sec 2024-11-16T08:39:55,136 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-16T08:39:55,136 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-16T08:39:55,137 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-16T08:39:55,137 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-16T08:39:55,137 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-16T08:39:55,137 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41633,1731746393880-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-16T08:39:55,137 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41633,1731746393880-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-16T08:39:55,139 DEBUG [master/c27dd56784bd:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-16T08:39:55,139 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-16T08:39:55,139 INFO [master/c27dd56784bd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c27dd56784bd,41633,1731746393880-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-16T08:39:55,174 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@499631cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:39:55,174 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c27dd56784bd,41633,-1 for getting cluster id 2024-11-16T08:39:55,174 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-16T08:39:55,175 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '01f5343d-0cd0-435f-a667-23c580aa6c8a' 2024-11-16T08:39:55,175 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-16T08:39:55,175 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "01f5343d-0cd0-435f-a667-23c580aa6c8a" 2024-11-16T08:39:55,176 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@589e7b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:39:55,176 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c27dd56784bd,41633,-1] 2024-11-16T08:39:55,176 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-16T08:39:55,176 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:55,177 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58980, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-16T08:39:55,177 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5734d2cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-16T08:39:55,178 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-16T08:39:55,179 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c27dd56784bd,39265,1731746394050, seqNum=-1] 2024-11-16T08:39:55,179 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-16T08:39:55,180 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49220, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-16T08:39:55,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c27dd56784bd,41633,1731746393880 2024-11-16T08:39:55,182 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-16T08:39:55,184 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-16T08:39:55,184 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-16T08:39:55,187 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/test.com,8080,1, archiveDir=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/oldWALs, maxLogs=32 2024-11-16T08:39:55,187 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731746395187 2024-11-16T08:39:55,192 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/test.com,8080,1/test.com%2C8080%2C1.1731746395187 2024-11-16T08:39:55,196 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36863:36863),(127.0.0.1/127.0.0.1:35539:35539)] 2024-11-16T08:39:55,198 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731746395198 2024-11-16T08:39:55,205 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,205 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,205 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,206 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,206 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,206 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/test.com,8080,1/test.com%2C8080%2C1.1731746395187 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/test.com,8080,1/test.com%2C8080%2C1.1731746395198 2024-11-16T08:39:55,208 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35539:35539),(127.0.0.1/127.0.0.1:36863:36863)] 2024-11-16T08:39:55,208 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/test.com,8080,1/test.com%2C8080%2C1.1731746395187 is not closed yet, will try archiving it next time 2024-11-16T08:39:55,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741835_1011 (size=93) 2024-11-16T08:39:55,211 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,212 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741835_1011 (size=93) 2024-11-16T08:39:55,212 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,212 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,212 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,213 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/WALs/test.com,8080,1/test.com%2C8080%2C1.1731746395187 to hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/oldWALs/test.com%2C8080%2C1.1731746395187 2024-11-16T08:39:55,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741836_1012 (size=93) 2024-11-16T08:39:55,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741836_1012 (size=93) 2024-11-16T08:39:55,215 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/oldWALs 2024-11-16T08:39:55,215 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731746395198) 2024-11-16T08:39:55,215 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-16T08:39:55,216 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:39:55,216 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:39:55,216 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:55,216 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:55,216 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-16T08:39:55,216 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-16T08:39:55,216 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1054820621, stopped=false 2024-11-16T08:39:55,216 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c27dd56784bd,41633,1731746393880 2024-11-16T08:39:55,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:39:55,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-16T08:39:55,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:55,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:55,235 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:39:55,236 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-16T08:39:55,236 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:39:55,236 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:39:55,236 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:55,236 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-16T08:39:55,236 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c27dd56784bd,39265,1731746394050' ***** 2024-11-16T08:39:55,236 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-16T08:39:55,236 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-16T08:39:55,236 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(959): stopping server c27dd56784bd,39265,1731746394050 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c27dd56784bd:39265. 2024-11-16T08:39:55,237 DEBUG [RS:0;c27dd56784bd:39265 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-16T08:39:55,237 DEBUG [RS:0;c27dd56784bd:39265 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-16T08:39:55,237 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-16T08:39:55,237 DEBUG [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-16T08:39:55,237 DEBUG [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-16T08:39:55,237 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-16T08:39:55,237 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-16T08:39:55,237 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-16T08:39:55,237 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-16T08:39:55,237 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-16T08:39:55,238 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-16T08:39:55,252 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740/.tmp/ns/202ff876db144142816b3873f1ffd4d0 is 43, key is default/ns:d/1731746395126/Put/seqid=0 2024-11-16T08:39:55,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741837_1013 (size=5153) 2024-11-16T08:39:55,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741837_1013 (size=5153) 2024-11-16T08:39:55,257 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740/.tmp/ns/202ff876db144142816b3873f1ffd4d0 2024-11-16T08:39:55,263 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740/.tmp/ns/202ff876db144142816b3873f1ffd4d0 as hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740/ns/202ff876db144142816b3873f1ffd4d0 2024-11-16T08:39:55,268 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740/ns/202ff876db144142816b3873f1ffd4d0, entries=2, sequenceid=6, filesize=5.0 K 2024-11-16T08:39:55,269 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false 2024-11-16T08:39:55,269 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-16T08:39:55,274 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-16T08:39:55,274 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-16T08:39:55,274 INFO [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-16T08:39:55,274 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731746395237Running coprocessor pre-close hooks at 1731746395237Disabling compacts and flushes for region at 1731746395237Disabling writes for close at 1731746395237Obtaining lock to block concurrent updates at 1731746395238 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731746395238Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731746395238Flushing stores of hbase:meta,,1.1588230740 at 1731746395238Flushing 1588230740/ns: creating writer at 1731746395239 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731746395252 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731746395252Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64a6ca5d: reopening flushed file at 1731746395262 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false at 1731746395269 (+7 ms)Writing region close event to WAL at 1731746395271 (+2 ms)Running coprocessor post-close hooks at 1731746395274 (+3 ms)Closed at 1731746395274 2024-11-16T08:39:55,275 DEBUG [RS_CLOSE_META-regionserver/c27dd56784bd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-16T08:39:55,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,44603,1731746189510/c27dd56784bd%2C44603%2C1731746189510.1731746189804 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:55,438 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(976): stopping server c27dd56784bd,39265,1731746394050; all regions closed. 2024-11-16T08:39:55,439 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,439 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,439 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,440 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,440 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741834_1010 (size=1152) 2024-11-16T08:39:55,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741834_1010 (size=1152) 2024-11-16T08:39:55,449 DEBUG [RS:0;c27dd56784bd:39265 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/oldWALs 2024-11-16T08:39:55,449 INFO [RS:0;c27dd56784bd:39265 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C39265%2C1731746394050.meta:.meta(num 1731746395020) 2024-11-16T08:39:55,450 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,450 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,450 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,450 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,450 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741833_1009 (size=93) 2024-11-16T08:39:55,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741833_1009 (size=93) 2024-11-16T08:39:55,454 DEBUG [RS:0;c27dd56784bd:39265 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/oldWALs 2024-11-16T08:39:55,454 INFO [RS:0;c27dd56784bd:39265 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c27dd56784bd%2C39265%2C1731746394050:(num 1731746394676) 2024-11-16T08:39:55,455 DEBUG [RS:0;c27dd56784bd:39265 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-16T08:39:55,455 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.LeaseManager(133): Closed leases 2024-11-16T08:39:55,455 INFO [RS:0;c27dd56784bd:39265 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:39:55,455 INFO [RS:0;c27dd56784bd:39265 {}] hbase.ChoreService(370): Chore service for: regionserver/c27dd56784bd:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-16T08:39:55,455 INFO [RS:0;c27dd56784bd:39265 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:39:55,455 INFO [regionserver/c27dd56784bd:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:39:55,455 INFO [RS:0;c27dd56784bd:39265 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39265 2024-11-16T08:39:55,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34591/user/jenkins/test-data/214d9061-491a-2874-86c5-eac183d4ac58/WALs/c27dd56784bd,34739,1731746188041/c27dd56784bd%2C34739%2C1731746188041.meta.1731746189235.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-16T08:39:55,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-16T08:39:55,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c27dd56784bd,39265,1731746394050 2024-11-16T08:39:55,737 INFO [RS:0;c27dd56784bd:39265 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:39:55,825 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c27dd56784bd,39265,1731746394050] 2024-11-16T08:39:55,835 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c27dd56784bd,39265,1731746394050 already deleted, retry=false 2024-11-16T08:39:55,836 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c27dd56784bd,39265,1731746394050 expired; onlineServers=0 2024-11-16T08:39:55,836 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c27dd56784bd,41633,1731746393880' ***** 2024-11-16T08:39:55,836 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-16T08:39:55,836 INFO [M:0;c27dd56784bd:41633 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-16T08:39:55,836 INFO [M:0;c27dd56784bd:41633 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-16T08:39:55,836 DEBUG [M:0;c27dd56784bd:41633 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-16T08:39:55,837 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-16T08:39:55,837 DEBUG [M:0;c27dd56784bd:41633 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-16T08:39:55,837 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746394392 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.large.0-1731746394392,5,FailOnTimeoutGroup] 2024-11-16T08:39:55,837 DEBUG [master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746394392 {}] cleaner.HFileCleaner(306): Exit Thread[master/c27dd56784bd:0:becomeActiveMaster-HFileCleaner.small.0-1731746394392,5,FailOnTimeoutGroup] 2024-11-16T08:39:55,837 INFO [M:0;c27dd56784bd:41633 {}] hbase.ChoreService(370): Chore service for: master/c27dd56784bd:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-16T08:39:55,838 INFO [M:0;c27dd56784bd:41633 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-16T08:39:55,838 DEBUG [M:0;c27dd56784bd:41633 {}] master.HMaster(1795): Stopping service threads 2024-11-16T08:39:55,838 INFO [M:0;c27dd56784bd:41633 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-16T08:39:55,838 INFO [M:0;c27dd56784bd:41633 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-16T08:39:55,838 INFO [M:0;c27dd56784bd:41633 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-16T08:39:55,839 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-16T08:39:55,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-16T08:39:55,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-16T08:39:55,846 DEBUG [M:0;c27dd56784bd:41633 {}] zookeeper.ZKUtil(347): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-16T08:39:55,847 WARN [M:0;c27dd56784bd:41633 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-16T08:39:55,848 INFO [M:0;c27dd56784bd:41633 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/.lastflushedseqids 2024-11-16T08:39:55,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741838_1014 (size=99) 2024-11-16T08:39:55,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741838_1014 (size=99) 2024-11-16T08:39:55,855 INFO [M:0;c27dd56784bd:41633 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-16T08:39:55,855 INFO [M:0;c27dd56784bd:41633 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-16T08:39:55,856 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-16T08:39:55,856 INFO [M:0;c27dd56784bd:41633 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:55,856 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:55,856 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-16T08:39:55,856 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:55,856 INFO [M:0;c27dd56784bd:41633 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-16T08:39:55,875 DEBUG [M:0;c27dd56784bd:41633 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6e679a53ee94497d8d3b437722c886a9 is 82, key is hbase:meta,,1/info:regioninfo/1731746395047/Put/seqid=0 2024-11-16T08:39:55,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741839_1015 (size=5672) 2024-11-16T08:39:55,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741839_1015 (size=5672) 2024-11-16T08:39:55,880 INFO [M:0;c27dd56784bd:41633 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6e679a53ee94497d8d3b437722c886a9 2024-11-16T08:39:55,896 DEBUG [M:0;c27dd56784bd:41633 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/79c5407cc70342778f6cb62cf894cf39 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731746395129/Put/seqid=0 2024-11-16T08:39:55,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741840_1016 (size=5275) 2024-11-16T08:39:55,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741840_1016 (size=5275) 2024-11-16T08:39:55,901 INFO [M:0;c27dd56784bd:41633 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/79c5407cc70342778f6cb62cf894cf39 2024-11-16T08:39:55,917 DEBUG [M:0;c27dd56784bd:41633 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48bb077f368541d6a52e83db3729bb9e is 69, key is c27dd56784bd,39265,1731746394050/rs:state/1731746394525/Put/seqid=0 2024-11-16T08:39:55,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741841_1017 (size=5156) 2024-11-16T08:39:55,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741841_1017 (size=5156) 2024-11-16T08:39:55,922 INFO [M:0;c27dd56784bd:41633 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48bb077f368541d6a52e83db3729bb9e 2024-11-16T08:39:55,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:39:55,925 INFO [RS:0;c27dd56784bd:39265 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:39:55,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39265-0x10142cceab60001, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:39:55,925 INFO [RS:0;c27dd56784bd:39265 {}] regionserver.HRegionServer(1031): Exiting; stopping=c27dd56784bd,39265,1731746394050; zookeeper connection closed. 2024-11-16T08:39:55,926 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3a294c20 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3a294c20 2024-11-16T08:39:55,926 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-16T08:39:55,940 DEBUG [M:0;c27dd56784bd:41633 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9d555abd7da043c7a5110e214b238f0f is 52, key is load_balancer_on/state:d/1731746395183/Put/seqid=0 2024-11-16T08:39:55,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741842_1018 (size=5056) 2024-11-16T08:39:55,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741842_1018 (size=5056) 2024-11-16T08:39:55,945 INFO [M:0;c27dd56784bd:41633 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9d555abd7da043c7a5110e214b238f0f 2024-11-16T08:39:55,949 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6e679a53ee94497d8d3b437722c886a9 as hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6e679a53ee94497d8d3b437722c886a9 2024-11-16T08:39:55,952 INFO [M:0;c27dd56784bd:41633 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6e679a53ee94497d8d3b437722c886a9, entries=8, sequenceid=29, filesize=5.5 K 2024-11-16T08:39:55,953 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/79c5407cc70342778f6cb62cf894cf39 as hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/79c5407cc70342778f6cb62cf894cf39 2024-11-16T08:39:55,957 INFO [M:0;c27dd56784bd:41633 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/79c5407cc70342778f6cb62cf894cf39, entries=3, sequenceid=29, filesize=5.2 K 2024-11-16T08:39:55,958 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/48bb077f368541d6a52e83db3729bb9e as hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/48bb077f368541d6a52e83db3729bb9e 2024-11-16T08:39:55,962 INFO [M:0;c27dd56784bd:41633 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/48bb077f368541d6a52e83db3729bb9e, entries=1, sequenceid=29, filesize=5.0 K 2024-11-16T08:39:55,963 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9d555abd7da043c7a5110e214b238f0f as hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9d555abd7da043c7a5110e214b238f0f 2024-11-16T08:39:55,966 INFO [M:0;c27dd56784bd:41633 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35073/user/jenkins/test-data/1e856d24-133f-237e-b9a7-e0cb4c741dcd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9d555abd7da043c7a5110e214b238f0f, entries=1, sequenceid=29, filesize=4.9 K 2024-11-16T08:39:55,967 INFO [M:0;c27dd56784bd:41633 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false 2024-11-16T08:39:55,968 INFO [M:0;c27dd56784bd:41633 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-16T08:39:55,969 DEBUG [M:0;c27dd56784bd:41633 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731746395856Disabling compacts and flushes for region at 1731746395856Disabling writes for close at 1731746395856Obtaining lock to block concurrent updates at 1731746395856Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731746395856Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731746395857 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731746395857Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731746395858 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731746395875 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731746395875Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731746395884 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731746395896 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731746395896Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731746395905 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731746395917 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731746395917Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731746395925 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731746395939 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731746395939Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e0268f7: reopening flushed file at 1731746395948 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f792eac: reopening flushed file at 1731746395952 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f51e81: reopening flushed file at 1731746395957 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43717375: reopening flushed file at 1731746395962 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false at 1731746395967 (+5 ms)Writing region close event to WAL at 1731746395968 (+1 ms)Closed at 1731746395968 2024-11-16T08:39:55,969 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,969 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,969 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,969 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,969 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-16T08:39:55,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35071 is added to blk_1073741830_1006 (size=10311) 2024-11-16T08:39:55,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33449 is added to blk_1073741830_1006 (size=10311) 2024-11-16T08:39:55,971 INFO [M:0;c27dd56784bd:41633 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-16T08:39:55,971 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-16T08:39:55,971 INFO [M:0;c27dd56784bd:41633 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41633 2024-11-16T08:39:55,972 INFO [M:0;c27dd56784bd:41633 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-16T08:39:56,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,012 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,032 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-16T08:39:56,083 INFO [M:0;c27dd56784bd:41633 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-16T08:39:56,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:39:56,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41633-0x10142cceab60000, quorum=127.0.0.1:50289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-16T08:39:56,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1efd209{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:39:56,085 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49e6dd92{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:39:56,085 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:39:56,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7acee9f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:39:56,085 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@624ed4c3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/hadoop.log.dir/,STOPPED} 2024-11-16T08:39:56,087 WARN [BP-1292931844-172.17.0.3-1731746391601 heartbeating to localhost/127.0.0.1:35073 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:39:56,087 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:39:56,087 WARN [BP-1292931844-172.17.0.3-1731746391601 heartbeating to localhost/127.0.0.1:35073 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1292931844-172.17.0.3-1731746391601 (Datanode Uuid ba1bdb30-b242-4624-94f8-9cedf766b0f7) service to localhost/127.0.0.1:35073 2024-11-16T08:39:56,087 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:39:56,087 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/data/data3/current/BP-1292931844-172.17.0.3-1731746391601 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:39:56,088 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/data/data4/current/BP-1292931844-172.17.0.3-1731746391601 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:39:56,088 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:39:56,090 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f5abbb6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-16T08:39:56,091 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7fcd61c6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:39:56,091 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:39:56,091 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b42f247{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:39:56,091 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46761010{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/hadoop.log.dir/,STOPPED} 2024-11-16T08:39:56,092 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-16T08:39:56,092 WARN [BP-1292931844-172.17.0.3-1731746391601 heartbeating to localhost/127.0.0.1:35073 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-16T08:39:56,092 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-16T08:39:56,092 WARN [BP-1292931844-172.17.0.3-1731746391601 heartbeating to localhost/127.0.0.1:35073 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1292931844-172.17.0.3-1731746391601 (Datanode Uuid 43d61ea2-4a73-4293-a679-578846cbf337) service to localhost/127.0.0.1:35073 2024-11-16T08:39:56,093 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/data/data1/current/BP-1292931844-172.17.0.3-1731746391601 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:39:56,093 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/cluster_cac8dd70-bd51-be84-6a3a-2489de92a254/data/data2/current/BP-1292931844-172.17.0.3-1731746391601 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-16T08:39:56,094 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-16T08:39:56,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f9a8217{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-16T08:39:56,105 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@b72d363{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-16T08:39:56,105 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-16T08:39:56,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e35321a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-16T08:39:56,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2191d18b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1a433f33-3b5e-dc84-2573-2fa92a790fd9/hadoop.log.dir/,STOPPED} 2024-11-16T08:39:56,110 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-16T08:39:56,123 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-16T08:39:56,130 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=266 (was 229) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35073 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:35073 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35073 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:35073 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35073 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:35073 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1496589199) connection to localhost/127.0.0.1:35073 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35073 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=208 (was 218), ProcessCount=11 (was 11), AvailableMemoryMB=4159 (was 4162)